Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Stage 1 of the trace events.
  3 *
  4 * Override the macros in <trace/trace_events.h> to include the following:
  5 *
  6 * struct ftrace_raw_<call> {
  7 *	struct trace_entry		ent;
  8 *	<type>				<item>;
  9 *	<type2>				<item2>[<len>];
 10 *	[...]
 11 * };
 12 *
 13 * The <type> <item> is created by the __field(type, item) macro or
 14 * the __array(type2, item2, len) macro.
 15 * We simply do "type item;", and that will create the fields
 16 * in the structure.
 17 */
 18
 19#include <linux/ftrace_event.h>
 20
 21/*
 22 * DECLARE_EVENT_CLASS can be used to add a generic function
 23 * handlers for events. That is, if all events have the same
 24 * parameters and just have distinct trace points.
 25 * Each tracepoint can be defined with DEFINE_EVENT and that
 26 * will map the DECLARE_EVENT_CLASS to the tracepoint.
 27 *
 28 * TRACE_EVENT is a one to one mapping between tracepoint and template.
 29 */
 30#undef TRACE_EVENT
 31#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
 32	DECLARE_EVENT_CLASS(name,			       \
 33			     PARAMS(proto),		       \
 34			     PARAMS(args),		       \
 35			     PARAMS(tstruct),		       \
 36			     PARAMS(assign),		       \
 37			     PARAMS(print));		       \
 38	DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
 39
 40
 41#undef __field
 42#define __field(type, item)		type	item;
 43
 44#undef __field_ext
 45#define __field_ext(type, item, filter_type)	type	item;
 46
 47#undef __array
 48#define __array(type, item, len)	type	item[len];
 49
 50#undef __dynamic_array
 51#define __dynamic_array(type, item, len) u32 __data_loc_##item;
 52
 53#undef __string
 54#define __string(item, src) __dynamic_array(char, item, -1)
 55
 56#undef TP_STRUCT__entry
 57#define TP_STRUCT__entry(args...) args
 58
 59#undef DECLARE_EVENT_CLASS
 60#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print)	\
 61	struct ftrace_raw_##name {					\
 62		struct trace_entry	ent;				\
 63		tstruct							\
 64		char			__data[0];			\
 65	};								\
 66									\
 67	static struct ftrace_event_class event_class_##name;
 68
 69#undef DEFINE_EVENT
 70#define DEFINE_EVENT(template, name, proto, args)	\
 71	static struct ftrace_event_call	__used		\
 72	__attribute__((__aligned__(4))) event_##name
 73
 
 
 
 
 74#undef DEFINE_EVENT_PRINT
 75#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
 76	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 77
 78/* Callbacks are meaningless to ftrace. */
 79#undef TRACE_EVENT_FN
 80#define TRACE_EVENT_FN(name, proto, args, tstruct,			\
 81		assign, print, reg, unreg)				\
 82	TRACE_EVENT(name, PARAMS(proto), PARAMS(args),			\
 83		PARAMS(tstruct), PARAMS(assign), PARAMS(print))		\
 84
 85#undef TRACE_EVENT_FLAGS
 86#define TRACE_EVENT_FLAGS(name, value)					\
 87	__TRACE_EVENT_FLAGS(name, value)
 88
 
 
 
 
 89#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 90
 91
 92/*
 93 * Stage 2 of the trace events.
 94 *
 95 * Include the following:
 96 *
 97 * struct ftrace_data_offsets_<call> {
 98 *	u32				<item1>;
 99 *	u32				<item2>;
100 *	[...]
101 * };
102 *
103 * The __dynamic_array() macro will create each u32 <item>, this is
104 * to keep the offset of each array from the beginning of the event.
105 * The size of an array is also encoded, in the higher 16 bits of <item>.
106 */
107
108#undef __field
109#define __field(type, item)
110
111#undef __field_ext
112#define __field_ext(type, item, filter_type)
113
114#undef __array
115#define __array(type, item, len)
116
117#undef __dynamic_array
118#define __dynamic_array(type, item, len)	u32 item;
119
120#undef __string
121#define __string(item, src) __dynamic_array(char, item, -1)
122
123#undef DECLARE_EVENT_CLASS
124#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
125	struct ftrace_data_offsets_##call {				\
126		tstruct;						\
127	};
128
129#undef DEFINE_EVENT
130#define DEFINE_EVENT(template, name, proto, args)
131
132#undef DEFINE_EVENT_PRINT
133#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
134	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
135
136#undef TRACE_EVENT_FLAGS
137#define TRACE_EVENT_FLAGS(event, flag)
138
 
 
 
139#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
140
141/*
142 * Stage 3 of the trace events.
143 *
144 * Override the macros in <trace/trace_events.h> to include the following:
145 *
146 * enum print_line_t
147 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
148 * {
149 *	struct trace_seq *s = &iter->seq;
150 *	struct ftrace_raw_<call> *field; <-- defined in stage 1
151 *	struct trace_entry *entry;
152 *	struct trace_seq *p = &iter->tmp_seq;
153 *	int ret;
154 *
155 *	entry = iter->ent;
156 *
157 *	if (entry->type != event_<call>->event.type) {
158 *		WARN_ON_ONCE(1);
159 *		return TRACE_TYPE_UNHANDLED;
160 *	}
161 *
162 *	field = (typeof(field))entry;
163 *
164 *	trace_seq_init(p);
165 *	ret = trace_seq_printf(s, "%s: ", <call>);
166 *	if (ret)
167 *		ret = trace_seq_printf(s, <TP_printk> "\n");
168 *	if (!ret)
169 *		return TRACE_TYPE_PARTIAL_LINE;
170 *
171 *	return TRACE_TYPE_HANDLED;
172 * }
173 *
174 * This is the method used to print the raw event to the trace
175 * output format. Note, this is not needed if the data is read
176 * in binary.
177 */
178
179#undef __entry
180#define __entry field
181
182#undef TP_printk
183#define TP_printk(fmt, args...) fmt "\n", args
184
185#undef __get_dynamic_array
186#define __get_dynamic_array(field)	\
187		((void *)__entry + (__entry->__data_loc_##field & 0xffff))
188
189#undef __get_str
190#define __get_str(field) (char *)__get_dynamic_array(field)
191
192#undef __print_flags
193#define __print_flags(flag, delim, flag_array...)			\
194	({								\
195		static const struct trace_print_flags __flags[] =	\
196			{ flag_array, { -1, NULL }};			\
197		ftrace_print_flags_seq(p, delim, flag, __flags);	\
198	})
199
200#undef __print_symbolic
201#define __print_symbolic(value, symbol_array...)			\
202	({								\
203		static const struct trace_print_flags symbols[] =	\
204			{ symbol_array, { -1, NULL }};			\
205		ftrace_print_symbols_seq(p, value, symbols);		\
206	})
207
208#undef __print_symbolic_u64
209#if BITS_PER_LONG == 32
210#define __print_symbolic_u64(value, symbol_array...)			\
211	({								\
212		static const struct trace_print_flags_u64 symbols[] =	\
213			{ symbol_array, { -1, NULL } };			\
214		ftrace_print_symbols_seq_u64(p, value, symbols);	\
215	})
216#else
217#define __print_symbolic_u64(value, symbol_array...)			\
218			__print_symbolic(value, symbol_array)
219#endif
220
221#undef __print_hex
222#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
223
224#undef DECLARE_EVENT_CLASS
225#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
226static notrace enum print_line_t					\
227ftrace_raw_output_##call(struct trace_iterator *iter, int flags,	\
228			 struct trace_event *trace_event)		\
229{									\
230	struct ftrace_event_call *event;				\
231	struct trace_seq *s = &iter->seq;				\
 
232	struct ftrace_raw_##call *field;				\
233	struct trace_entry *entry;					\
234	struct trace_seq *p = &iter->tmp_seq;				\
235	int ret;							\
236									\
237	event = container_of(trace_event, struct ftrace_event_call,	\
238			     event);					\
239									\
240	entry = iter->ent;						\
241									\
242	if (entry->type != event->event.type) {				\
243		WARN_ON_ONCE(1);					\
244		return TRACE_TYPE_UNHANDLED;				\
245	}								\
246									\
247	field = (typeof(field))entry;					\
248									\
249	trace_seq_init(p);						\
250	ret = trace_seq_printf(s, "%s: ", event->name);			\
251	if (ret)							\
252		ret = trace_seq_printf(s, print);			\
 
 
253	if (!ret)							\
254		return TRACE_TYPE_PARTIAL_LINE;				\
255									\
256	return TRACE_TYPE_HANDLED;					\
257}									\
258static struct trace_event_functions ftrace_event_type_funcs_##call = {	\
259	.trace			= ftrace_raw_output_##call,		\
260};
261
262#undef DEFINE_EVENT_PRINT
263#define DEFINE_EVENT_PRINT(template, call, proto, args, print)		\
264static notrace enum print_line_t					\
265ftrace_raw_output_##call(struct trace_iterator *iter, int flags,	\
266			 struct trace_event *event)			\
267{									\
268	struct trace_seq *s = &iter->seq;				\
269	struct ftrace_raw_##template *field;				\
270	struct trace_entry *entry;					\
271	struct trace_seq *p = &iter->tmp_seq;				\
272	int ret;							\
273									\
274	entry = iter->ent;						\
275									\
276	if (entry->type != event_##call.event.type) {			\
277		WARN_ON_ONCE(1);					\
278		return TRACE_TYPE_UNHANDLED;				\
279	}								\
280									\
281	field = (typeof(field))entry;					\
282									\
283	trace_seq_init(p);						\
284	ret = trace_seq_printf(s, "%s: ", #call);			\
285	if (ret)							\
286		ret = trace_seq_printf(s, print);			\
287	if (!ret)							\
288		return TRACE_TYPE_PARTIAL_LINE;				\
289									\
290	return TRACE_TYPE_HANDLED;					\
291}									\
292static struct trace_event_functions ftrace_event_type_funcs_##call = {	\
293	.trace			= ftrace_raw_output_##call,		\
294};
295
296#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
297
298#undef __field_ext
299#define __field_ext(type, item, filter_type)				\
300	ret = trace_define_field(event_call, #type, #item,		\
301				 offsetof(typeof(field), item),		\
302				 sizeof(field.item),			\
303				 is_signed_type(type), filter_type);	\
304	if (ret)							\
305		return ret;
306
307#undef __field
308#define __field(type, item)	__field_ext(type, item, FILTER_OTHER)
309
310#undef __array
311#define __array(type, item, len)					\
312	do {								\
313		mutex_lock(&event_storage_mutex);			\
314		BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);			\
315		snprintf(event_storage, sizeof(event_storage),		\
316			 "%s[%d]", #type, len);				\
317		ret = trace_define_field(event_call, event_storage, #item, \
318				 offsetof(typeof(field), item),		\
319				 sizeof(field.item),			\
320				 is_signed_type(type), FILTER_OTHER);	\
321		mutex_unlock(&event_storage_mutex);			\
322		if (ret)						\
323			return ret;					\
324	} while (0);
325
326#undef __dynamic_array
327#define __dynamic_array(type, item, len)				       \
328	ret = trace_define_field(event_call, "__data_loc " #type "[]", #item,  \
329				 offsetof(typeof(field), __data_loc_##item),   \
330				 sizeof(field.__data_loc_##item),	       \
331				 is_signed_type(type), FILTER_OTHER);
332
333#undef __string
334#define __string(item, src) __dynamic_array(char, item, -1)
335
336#undef DECLARE_EVENT_CLASS
337#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print)	\
338static int notrace							\
339ftrace_define_fields_##call(struct ftrace_event_call *event_call)	\
340{									\
341	struct ftrace_raw_##call field;					\
342	int ret;							\
343									\
344	tstruct;							\
345									\
346	return ret;							\
347}
348
349#undef DEFINE_EVENT
350#define DEFINE_EVENT(template, name, proto, args)
351
352#undef DEFINE_EVENT_PRINT
353#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
354	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
355
356#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
357
358/*
359 * remember the offset of each array from the beginning of the event.
360 */
361
362#undef __entry
363#define __entry entry
364
365#undef __field
366#define __field(type, item)
367
368#undef __field_ext
369#define __field_ext(type, item, filter_type)
370
371#undef __array
372#define __array(type, item, len)
373
374#undef __dynamic_array
375#define __dynamic_array(type, item, len)				\
 
376	__data_offsets->item = __data_size +				\
377			       offsetof(typeof(*entry), __data);	\
378	__data_offsets->item |= (len * sizeof(type)) << 16;		\
379	__data_size += (len) * sizeof(type);
380
381#undef __string
382#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
 
383
384#undef DECLARE_EVENT_CLASS
385#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
386static inline notrace int ftrace_get_offsets_##call(			\
387	struct ftrace_data_offsets_##call *__data_offsets, proto)       \
388{									\
389	int __data_size = 0;						\
 
390	struct ftrace_raw_##call __maybe_unused *entry;			\
391									\
392	tstruct;							\
393									\
394	return __data_size;						\
395}
396
397#undef DEFINE_EVENT
398#define DEFINE_EVENT(template, name, proto, args)
399
400#undef DEFINE_EVENT_PRINT
401#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
402	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
403
404#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
405
406/*
407 * Stage 4 of the trace events.
408 *
409 * Override the macros in <trace/trace_events.h> to include the following:
410 *
411 * For those macros defined with TRACE_EVENT:
412 *
413 * static struct ftrace_event_call event_<call>;
414 *
415 * static void ftrace_raw_event_<call>(void *__data, proto)
416 * {
417 *	struct ftrace_event_call *event_call = __data;
 
418 *	struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
 
 
419 *	struct ring_buffer_event *event;
420 *	struct ftrace_raw_<call> *entry; <-- defined in stage 1
421 *	struct ring_buffer *buffer;
422 *	unsigned long irq_flags;
423 *	int __data_size;
424 *	int pc;
425 *
 
 
 
 
 
 
 
426 *	local_save_flags(irq_flags);
427 *	pc = preempt_count();
428 *
429 *	__data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
430 *
431 *	event = trace_current_buffer_lock_reserve(&buffer,
432 *				  event_<call>->event.type,
433 *				  sizeof(*entry) + __data_size,
434 *				  irq_flags, pc);
435 *	if (!event)
436 *		return;
437 *	entry	= ring_buffer_event_data(event);
438 *
439 *	{ <assign>; }  <-- Here we assign the entries by the __field and
440 *			   __array macros.
441 *
442 *	if (!filter_current_check_discard(buffer, event_call, entry, event))
443 *		trace_current_buffer_unlock_commit(buffer,
444 *						   event, irq_flags, pc);
 
 
 
 
 
 
 
 
445 * }
446 *
447 * static struct trace_event ftrace_event_type_<call> = {
448 *	.trace			= ftrace_raw_output_<call>, <-- stage 2
449 * };
450 *
451 * static const char print_fmt_<call>[] = <TP_printk>;
452 *
453 * static struct ftrace_event_class __used event_class_<template> = {
454 *	.system			= "<system>",
455 *	.define_fields		= ftrace_define_fields_<call>,
456 *	.fields			= LIST_HEAD_INIT(event_class_##call.fields),
457 *	.raw_init		= trace_event_raw_init,
458 *	.probe			= ftrace_raw_event_##call,
459 *	.reg			= ftrace_event_reg,
460 * };
461 *
462 * static struct ftrace_event_call event_<call> = {
463 *	.name			= "<call>",
464 *	.class			= event_class_<template>,
 
 
 
465 *	.event			= &ftrace_event_type_<call>,
466 *	.print_fmt		= print_fmt_<call>,
 
467 * };
468 * // its only safe to use pointers when doing linker tricks to
469 * // create an array.
470 * static struct ftrace_event_call __used
471 * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
472 *
473 */
474
475#ifdef CONFIG_PERF_EVENTS
476
477#define _TRACE_PERF_PROTO(call, proto)					\
478	static notrace void						\
479	perf_trace_##call(void *__data, proto);
480
481#define _TRACE_PERF_INIT(call)						\
482	.perf_probe		= perf_trace_##call,
483
484#else
485#define _TRACE_PERF_PROTO(call, proto)
486#define _TRACE_PERF_INIT(call)
487#endif /* CONFIG_PERF_EVENTS */
488
489#undef __entry
490#define __entry entry
491
492#undef __field
493#define __field(type, item)
494
495#undef __array
496#define __array(type, item, len)
497
498#undef __dynamic_array
499#define __dynamic_array(type, item, len)				\
500	__entry->__data_loc_##item = __data_offsets.item;
501
502#undef __string
503#define __string(item, src) __dynamic_array(char, item, -1)       	\
504
505#undef __assign_str
506#define __assign_str(dst, src)						\
507	strcpy(__get_str(dst), src);
508
509#undef TP_fast_assign
510#define TP_fast_assign(args...) args
511
512#undef TP_perf_assign
513#define TP_perf_assign(args...)
 
 
 
 
 
 
514
515#undef DECLARE_EVENT_CLASS
516#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
517									\
518static notrace void							\
519ftrace_raw_event_##call(void *__data, proto)				\
520{									\
521	struct ftrace_event_call *event_call = __data;			\
522	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
523	struct ring_buffer_event *event;				\
524	struct ftrace_raw_##call *entry;				\
525	struct ring_buffer *buffer;					\
526	unsigned long irq_flags;					\
527	int __data_size;						\
528	int pc;								\
529									\
530	local_save_flags(irq_flags);					\
531	pc = preempt_count();						\
532									\
533	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
534									\
535	event = trace_current_buffer_lock_reserve(&buffer,		\
536				 event_call->event.type,		\
537				 sizeof(*entry) + __data_size,		\
538				 irq_flags, pc);			\
539	if (!event)							\
540		return;							\
541	entry	= ring_buffer_event_data(event);			\
542									\
543	tstruct								\
544									\
545	{ assign; }							\
546									\
547	if (!filter_current_check_discard(buffer, event_call, entry, event)) \
548		trace_nowake_buffer_unlock_commit(buffer,		\
549						  event, irq_flags, pc); \
550}
551/*
552 * The ftrace_test_probe is compiled out, it is only here as a build time check
553 * to make sure that if the tracepoint handling changes, the ftrace probe will
554 * fail to compile unless it too is updated.
555 */
556
557#undef DEFINE_EVENT
558#define DEFINE_EVENT(template, call, proto, args)			\
559static inline void ftrace_test_probe_##call(void)			\
560{									\
561	check_trace_callback_type_##call(ftrace_raw_event_##template);	\
562}
563
564#undef DEFINE_EVENT_PRINT
565#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
566
567#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
568
569#undef __entry
570#define __entry REC
571
572#undef __print_flags
573#undef __print_symbolic
 
574#undef __get_dynamic_array
575#undef __get_str
576
577#undef TP_printk
578#define TP_printk(fmt, args...) "\"" fmt "\", "  __stringify(args)
579
580#undef DECLARE_EVENT_CLASS
581#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
582_TRACE_PERF_PROTO(call, PARAMS(proto));					\
583static const char print_fmt_##call[] = print;				\
584static struct ftrace_event_class __used event_class_##call = {		\
585	.system			= __stringify(TRACE_SYSTEM),		\
586	.define_fields		= ftrace_define_fields_##call,		\
587	.fields			= LIST_HEAD_INIT(event_class_##call.fields),\
588	.raw_init		= trace_event_raw_init,			\
589	.probe			= ftrace_raw_event_##call,		\
590	.reg			= ftrace_event_reg,			\
591	_TRACE_PERF_INIT(call)						\
592};
593
594#undef DEFINE_EVENT
595#define DEFINE_EVENT(template, call, proto, args)			\
596									\
597static struct ftrace_event_call __used event_##call = {			\
598	.name			= #call,				\
599	.class			= &event_class_##template,		\
 
 
 
600	.event.funcs		= &ftrace_event_type_funcs_##template,	\
601	.print_fmt		= print_fmt_##template,			\
 
602};									\
603static struct ftrace_event_call __used					\
604__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
605
606#undef DEFINE_EVENT_PRINT
607#define DEFINE_EVENT_PRINT(template, call, proto, args, print)		\
608									\
609static const char print_fmt_##call[] = print;				\
610									\
611static struct ftrace_event_call __used event_##call = {			\
612	.name			= #call,				\
613	.class			= &event_class_##template,		\
 
 
 
614	.event.funcs		= &ftrace_event_type_funcs_##call,	\
615	.print_fmt		= print_fmt_##call,			\
 
616};									\
617static struct ftrace_event_call __used					\
618__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
619
620#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
621
622/*
623 * Define the insertion callback to perf events
624 *
625 * The job is very similar to ftrace_raw_event_<call> except that we don't
626 * insert in the ring buffer but in a perf counter.
627 *
628 * static void ftrace_perf_<call>(proto)
629 * {
630 *	struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
631 *	struct ftrace_event_call *event_call = &event_<call>;
632 *	extern void perf_tp_event(int, u64, u64, void *, int);
633 *	struct ftrace_raw_##call *entry;
634 *	struct perf_trace_buf *trace_buf;
635 *	u64 __addr = 0, __count = 1;
636 *	unsigned long irq_flags;
637 *	struct trace_entry *ent;
638 *	int __entry_size;
639 *	int __data_size;
640 *	int __cpu
641 *	int pc;
642 *
643 *	pc = preempt_count();
644 *
645 *	__data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
646 *
647 *	// Below we want to get the aligned size by taking into account
648 *	// the u32 field that will later store the buffer size
649 *	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
650 *			     sizeof(u64));
651 *	__entry_size -= sizeof(u32);
652 *
653 *	// Protect the non nmi buffer
654 *	// This also protects the rcu read side
655 *	local_irq_save(irq_flags);
656 *	__cpu = smp_processor_id();
657 *
658 *	if (in_nmi())
659 *		trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
660 *	else
661 *		trace_buf = rcu_dereference_sched(perf_trace_buf);
662 *
663 *	if (!trace_buf)
664 *		goto end;
665 *
666 *	trace_buf = per_cpu_ptr(trace_buf, __cpu);
667 *
668 * 	// Avoid recursion from perf that could mess up the buffer
669 * 	if (trace_buf->recursion++)
670 *		goto end_recursion;
671 *
672 * 	raw_data = trace_buf->buf;
673 *
674 *	// Make recursion update visible before entering perf_tp_event
675 *	// so that we protect from perf recursions.
676 *
677 *	barrier();
678 *
679 *	//zero dead bytes from alignment to avoid stack leak to userspace:
680 *	*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
681 *	entry = (struct ftrace_raw_<call> *)raw_data;
682 *	ent = &entry->ent;
683 *	tracing_generic_entry_update(ent, irq_flags, pc);
684 *	ent->type = event_call->id;
685 *
686 *	<tstruct> <- do some jobs with dynamic arrays
687 *
688 *	<assign>  <- affect our values
689 *
690 *	perf_tp_event(event_call->id, __addr, __count, entry,
691 *		     __entry_size);  <- submit them to perf counter
692 *
693 * }
694 */
695
696#ifdef CONFIG_PERF_EVENTS
697
698#undef __entry
699#define __entry entry
700
701#undef __get_dynamic_array
702#define __get_dynamic_array(field)	\
703		((void *)__entry + (__entry->__data_loc_##field & 0xffff))
704
705#undef __get_str
706#define __get_str(field) (char *)__get_dynamic_array(field)
707
708#undef __perf_addr
709#define __perf_addr(a) __addr = (a)
710
711#undef __perf_count
712#define __perf_count(c) __count = (c)
 
 
 
713
714#undef DECLARE_EVENT_CLASS
715#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
716static notrace void							\
717perf_trace_##call(void *__data, proto)					\
718{									\
719	struct ftrace_event_call *event_call = __data;			\
720	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
721	struct ftrace_raw_##call *entry;				\
722	struct pt_regs __regs;						\
723	u64 __addr = 0, __count = 1;					\
 
724	struct hlist_head *head;					\
725	int __entry_size;						\
726	int __data_size;						\
727	int rctx;							\
728									\
729	perf_fetch_caller_regs(&__regs);				\
730									\
731	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
 
 
 
 
 
 
732	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
733			     sizeof(u64));				\
734	__entry_size -= sizeof(u32);					\
735									\
736	if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE,		\
737		      "profile buffer not large enough"))		\
738		return;							\
739									\
740	entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare(	\
741		__entry_size, event_call->event.type, &__regs, &rctx);	\
742	if (!entry)							\
743		return;							\
744									\
745	tstruct								\
746									\
747	{ assign; }							\
748									\
749	head = this_cpu_ptr(event_call->perf_events);			\
750	perf_trace_buf_submit(entry, __entry_size, rctx, __addr,	\
751		__count, &__regs, head);				\
752}
753
754/*
755 * This part is compiled out, it is only here as a build time check
756 * to make sure that if the tracepoint handling changes, the
757 * perf probe will fail to compile unless it too is updated.
758 */
759#undef DEFINE_EVENT
760#define DEFINE_EVENT(template, call, proto, args)			\
761static inline void perf_test_probe_##call(void)				\
762{									\
763	check_trace_callback_type_##call(perf_trace_##template);	\
764}
765
766
767#undef DEFINE_EVENT_PRINT
768#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
769	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
770
771#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
772#endif /* CONFIG_PERF_EVENTS */
773
774#undef _TRACE_PROFILE_INIT
775
v3.15
  1/*
  2 * Stage 1 of the trace events.
  3 *
  4 * Override the macros in <trace/trace_events.h> to include the following:
  5 *
  6 * struct ftrace_raw_<call> {
  7 *	struct trace_entry		ent;
  8 *	<type>				<item>;
  9 *	<type2>				<item2>[<len>];
 10 *	[...]
 11 * };
 12 *
 13 * The <type> <item> is created by the __field(type, item) macro or
 14 * the __array(type2, item2, len) macro.
 15 * We simply do "type item;", and that will create the fields
 16 * in the structure.
 17 */
 18
 19#include <linux/ftrace_event.h>
 20
 21/*
 22 * DECLARE_EVENT_CLASS can be used to add a generic function
 23 * handlers for events. That is, if all events have the same
 24 * parameters and just have distinct trace points.
 25 * Each tracepoint can be defined with DEFINE_EVENT and that
 26 * will map the DECLARE_EVENT_CLASS to the tracepoint.
 27 *
 28 * TRACE_EVENT is a one to one mapping between tracepoint and template.
 29 */
 30#undef TRACE_EVENT
 31#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
 32	DECLARE_EVENT_CLASS(name,			       \
 33			     PARAMS(proto),		       \
 34			     PARAMS(args),		       \
 35			     PARAMS(tstruct),		       \
 36			     PARAMS(assign),		       \
 37			     PARAMS(print));		       \
 38	DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
 39
 40
 41#undef __field
 42#define __field(type, item)		type	item;
 43
 44#undef __field_ext
 45#define __field_ext(type, item, filter_type)	type	item;
 46
 47#undef __array
 48#define __array(type, item, len)	type	item[len];
 49
 50#undef __dynamic_array
 51#define __dynamic_array(type, item, len) u32 __data_loc_##item;
 52
 53#undef __string
 54#define __string(item, src) __dynamic_array(char, item, -1)
 55
 56#undef TP_STRUCT__entry
 57#define TP_STRUCT__entry(args...) args
 58
 59#undef DECLARE_EVENT_CLASS
 60#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print)	\
 61	struct ftrace_raw_##name {					\
 62		struct trace_entry	ent;				\
 63		tstruct							\
 64		char			__data[0];			\
 65	};								\
 66									\
 67	static struct ftrace_event_class event_class_##name;
 68
 69#undef DEFINE_EVENT
 70#define DEFINE_EVENT(template, name, proto, args)	\
 71	static struct ftrace_event_call	__used		\
 72	__attribute__((__aligned__(4))) event_##name
 73
 74#undef DEFINE_EVENT_FN
 75#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg)	\
 76	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 77
 78#undef DEFINE_EVENT_PRINT
 79#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
 80	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 81
 82/* Callbacks are meaningless to ftrace. */
 83#undef TRACE_EVENT_FN
 84#define TRACE_EVENT_FN(name, proto, args, tstruct,			\
 85		assign, print, reg, unreg)				\
 86	TRACE_EVENT(name, PARAMS(proto), PARAMS(args),			\
 87		PARAMS(tstruct), PARAMS(assign), PARAMS(print))		\
 88
 89#undef TRACE_EVENT_FLAGS
 90#define TRACE_EVENT_FLAGS(name, value)					\
 91	__TRACE_EVENT_FLAGS(name, value)
 92
 93#undef TRACE_EVENT_PERF_PERM
 94#define TRACE_EVENT_PERF_PERM(name, expr...)				\
 95	__TRACE_EVENT_PERF_PERM(name, expr)
 96
 97#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 98
 99
100/*
101 * Stage 2 of the trace events.
102 *
103 * Include the following:
104 *
105 * struct ftrace_data_offsets_<call> {
106 *	u32				<item1>;
107 *	u32				<item2>;
108 *	[...]
109 * };
110 *
111 * The __dynamic_array() macro will create each u32 <item>, this is
112 * to keep the offset of each array from the beginning of the event.
113 * The size of an array is also encoded, in the higher 16 bits of <item>.
114 */
115
116#undef __field
117#define __field(type, item)
118
119#undef __field_ext
120#define __field_ext(type, item, filter_type)
121
122#undef __array
123#define __array(type, item, len)
124
125#undef __dynamic_array
126#define __dynamic_array(type, item, len)	u32 item;
127
128#undef __string
129#define __string(item, src) __dynamic_array(char, item, -1)
130
131#undef DECLARE_EVENT_CLASS
132#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
133	struct ftrace_data_offsets_##call {				\
134		tstruct;						\
135	};
136
137#undef DEFINE_EVENT
138#define DEFINE_EVENT(template, name, proto, args)
139
140#undef DEFINE_EVENT_PRINT
141#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
142	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
143
144#undef TRACE_EVENT_FLAGS
145#define TRACE_EVENT_FLAGS(event, flag)
146
147#undef TRACE_EVENT_PERF_PERM
148#define TRACE_EVENT_PERF_PERM(event, expr...)
149
150#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
151
152/*
153 * Stage 3 of the trace events.
154 *
155 * Override the macros in <trace/trace_events.h> to include the following:
156 *
157 * enum print_line_t
158 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
159 * {
160 *	struct trace_seq *s = &iter->seq;
161 *	struct ftrace_raw_<call> *field; <-- defined in stage 1
162 *	struct trace_entry *entry;
163 *	struct trace_seq *p = &iter->tmp_seq;
164 *	int ret;
165 *
166 *	entry = iter->ent;
167 *
168 *	if (entry->type != event_<call>->event.type) {
169 *		WARN_ON_ONCE(1);
170 *		return TRACE_TYPE_UNHANDLED;
171 *	}
172 *
173 *	field = (typeof(field))entry;
174 *
175 *	trace_seq_init(p);
176 *	ret = trace_seq_printf(s, "%s: ", <call>);
177 *	if (ret)
178 *		ret = trace_seq_printf(s, <TP_printk> "\n");
179 *	if (!ret)
180 *		return TRACE_TYPE_PARTIAL_LINE;
181 *
182 *	return TRACE_TYPE_HANDLED;
183 * }
184 *
185 * This is the method used to print the raw event to the trace
186 * output format. Note, this is not needed if the data is read
187 * in binary.
188 */
189
190#undef __entry
191#define __entry field
192
193#undef TP_printk
194#define TP_printk(fmt, args...) fmt "\n", args
195
196#undef __get_dynamic_array
197#define __get_dynamic_array(field)	\
198		((void *)__entry + (__entry->__data_loc_##field & 0xffff))
199
200#undef __get_str
201#define __get_str(field) (char *)__get_dynamic_array(field)
202
203#undef __print_flags
204#define __print_flags(flag, delim, flag_array...)			\
205	({								\
206		static const struct trace_print_flags __flags[] =	\
207			{ flag_array, { -1, NULL }};			\
208		ftrace_print_flags_seq(p, delim, flag, __flags);	\
209	})
210
211#undef __print_symbolic
212#define __print_symbolic(value, symbol_array...)			\
213	({								\
214		static const struct trace_print_flags symbols[] =	\
215			{ symbol_array, { -1, NULL }};			\
216		ftrace_print_symbols_seq(p, value, symbols);		\
217	})
218
219#undef __print_symbolic_u64
220#if BITS_PER_LONG == 32
221#define __print_symbolic_u64(value, symbol_array...)			\
222	({								\
223		static const struct trace_print_flags_u64 symbols[] =	\
224			{ symbol_array, { -1, NULL } };			\
225		ftrace_print_symbols_seq_u64(p, value, symbols);	\
226	})
227#else
228#define __print_symbolic_u64(value, symbol_array...)			\
229			__print_symbolic(value, symbol_array)
230#endif
231
232#undef __print_hex
233#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
234
235#undef DECLARE_EVENT_CLASS
236#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
237static notrace enum print_line_t					\
238ftrace_raw_output_##call(struct trace_iterator *iter, int flags,	\
239			 struct trace_event *trace_event)		\
240{									\
 
241	struct trace_seq *s = &iter->seq;				\
242	struct trace_seq __maybe_unused *p = &iter->tmp_seq;		\
243	struct ftrace_raw_##call *field;				\
 
 
244	int ret;							\
245									\
246	field = (typeof(field))iter->ent;				\
 
 
 
247									\
248	ret = ftrace_raw_output_prep(iter, trace_event);		\
 
 
 
 
 
 
 
 
249	if (ret)							\
250		return ret;						\
251									\
252	ret = trace_seq_printf(s, print);				\
253	if (!ret)							\
254		return TRACE_TYPE_PARTIAL_LINE;				\
255									\
256	return TRACE_TYPE_HANDLED;					\
257}									\
258static struct trace_event_functions ftrace_event_type_funcs_##call = {	\
259	.trace			= ftrace_raw_output_##call,		\
260};
261
262#undef DEFINE_EVENT_PRINT
263#define DEFINE_EVENT_PRINT(template, call, proto, args, print)		\
264static notrace enum print_line_t					\
265ftrace_raw_output_##call(struct trace_iterator *iter, int flags,	\
266			 struct trace_event *event)			\
267{									\
 
268	struct ftrace_raw_##template *field;				\
269	struct trace_entry *entry;					\
270	struct trace_seq *p = &iter->tmp_seq;				\
 
271									\
272	entry = iter->ent;						\
273									\
274	if (entry->type != event_##call.event.type) {			\
275		WARN_ON_ONCE(1);					\
276		return TRACE_TYPE_UNHANDLED;				\
277	}								\
278									\
279	field = (typeof(field))entry;					\
280									\
281	trace_seq_init(p);						\
282	return ftrace_output_call(iter, #call, print);			\
 
 
 
 
 
 
283}									\
284static struct trace_event_functions ftrace_event_type_funcs_##call = {	\
285	.trace			= ftrace_raw_output_##call,		\
286};
287
288#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
289
290#undef __field_ext
291#define __field_ext(type, item, filter_type)				\
292	ret = trace_define_field(event_call, #type, #item,		\
293				 offsetof(typeof(field), item),		\
294				 sizeof(field.item),			\
295				 is_signed_type(type), filter_type);	\
296	if (ret)							\
297		return ret;
298
299#undef __field
300#define __field(type, item)	__field_ext(type, item, FILTER_OTHER)
301
302#undef __array
303#define __array(type, item, len)					\
304	do {								\
305		char *type_str = #type"["__stringify(len)"]";		\
306		BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);			\
307		ret = trace_define_field(event_call, type_str, #item,	\
 
 
308				 offsetof(typeof(field), item),		\
309				 sizeof(field.item),			\
310				 is_signed_type(type), FILTER_OTHER);	\
 
311		if (ret)						\
312			return ret;					\
313	} while (0);
314
315#undef __dynamic_array
316#define __dynamic_array(type, item, len)				       \
317	ret = trace_define_field(event_call, "__data_loc " #type "[]", #item,  \
318				 offsetof(typeof(field), __data_loc_##item),   \
319				 sizeof(field.__data_loc_##item),	       \
320				 is_signed_type(type), FILTER_OTHER);
321
322#undef __string
323#define __string(item, src) __dynamic_array(char, item, -1)
324
325#undef DECLARE_EVENT_CLASS
326#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print)	\
327static int notrace __init						\
328ftrace_define_fields_##call(struct ftrace_event_call *event_call)	\
329{									\
330	struct ftrace_raw_##call field;					\
331	int ret;							\
332									\
333	tstruct;							\
334									\
335	return ret;							\
336}
337
338#undef DEFINE_EVENT
339#define DEFINE_EVENT(template, name, proto, args)
340
341#undef DEFINE_EVENT_PRINT
342#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
343	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
344
345#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
346
347/*
348 * remember the offset of each array from the beginning of the event.
349 */
350
351#undef __entry
352#define __entry entry
353
354#undef __field
355#define __field(type, item)
356
357#undef __field_ext
358#define __field_ext(type, item, filter_type)
359
360#undef __array
361#define __array(type, item, len)
362
363#undef __dynamic_array
364#define __dynamic_array(type, item, len)				\
365	__item_length = (len) * sizeof(type);				\
366	__data_offsets->item = __data_size +				\
367			       offsetof(typeof(*entry), __data);	\
368	__data_offsets->item |= __item_length << 16;			\
369	__data_size += __item_length;
370
371#undef __string
372#define __string(item, src) __dynamic_array(char, item,			\
373		    strlen((src) ? (const char *)(src) : "(null)") + 1)
374
375#undef DECLARE_EVENT_CLASS
376#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
377static inline notrace int ftrace_get_offsets_##call(			\
378	struct ftrace_data_offsets_##call *__data_offsets, proto)       \
379{									\
380	int __data_size = 0;						\
381	int __maybe_unused __item_length;				\
382	struct ftrace_raw_##call __maybe_unused *entry;			\
383									\
384	tstruct;							\
385									\
386	return __data_size;						\
387}
388
389#undef DEFINE_EVENT
390#define DEFINE_EVENT(template, name, proto, args)
391
392#undef DEFINE_EVENT_PRINT
393#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
394	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
395
396#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
397
398/*
399 * Stage 4 of the trace events.
400 *
401 * Override the macros in <trace/trace_events.h> to include the following:
402 *
403 * For those macros defined with TRACE_EVENT:
404 *
405 * static struct ftrace_event_call event_<call>;
406 *
407 * static void ftrace_raw_event_<call>(void *__data, proto)
408 * {
409 *	struct ftrace_event_file *ftrace_file = __data;
410 *	struct ftrace_event_call *event_call = ftrace_file->event_call;
411 *	struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
412 *	unsigned long eflags = ftrace_file->flags;
413 *	enum event_trigger_type __tt = ETT_NONE;
414 *	struct ring_buffer_event *event;
415 *	struct ftrace_raw_<call> *entry; <-- defined in stage 1
416 *	struct ring_buffer *buffer;
417 *	unsigned long irq_flags;
418 *	int __data_size;
419 *	int pc;
420 *
421 *	if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
422 *		if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
423 *			event_triggers_call(ftrace_file, NULL);
424 *		if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
425 *			return;
426 *	}
427 *
428 *	local_save_flags(irq_flags);
429 *	pc = preempt_count();
430 *
431 *	__data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
432 *
433 *	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
434 *				  event_<call>->event.type,
435 *				  sizeof(*entry) + __data_size,
436 *				  irq_flags, pc);
437 *	if (!event)
438 *		return;
439 *	entry	= ring_buffer_event_data(event);
440 *
441 *	{ <assign>; }  <-- Here we assign the entries by the __field and
442 *			   __array macros.
443 *
444 *	if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
445 *		__tt = event_triggers_call(ftrace_file, entry);
446 *
447 *	if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
448 *		     &ftrace_file->flags))
449 *		ring_buffer_discard_commit(buffer, event);
450 *	else if (!filter_check_discard(ftrace_file, entry, buffer, event))
451 *		trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
452 *
453 *	if (__tt)
454 *		event_triggers_post_call(ftrace_file, __tt);
455 * }
456 *
457 * static struct trace_event ftrace_event_type_<call> = {
458 *	.trace			= ftrace_raw_output_<call>, <-- stage 2
459 * };
460 *
461 * static const char print_fmt_<call>[] = <TP_printk>;
462 *
463 * static struct ftrace_event_class __used event_class_<template> = {
464 *	.system			= "<system>",
465 *	.define_fields		= ftrace_define_fields_<call>,
466 *	.fields			= LIST_HEAD_INIT(event_class_##call.fields),
467 *	.raw_init		= trace_event_raw_init,
468 *	.probe			= ftrace_raw_event_##call,
469 *	.reg			= ftrace_event_reg,
470 * };
471 *
472 * static struct ftrace_event_call event_<call> = {
 
473 *	.class			= event_class_<template>,
474 *	{
475 *		.tp			= &__tracepoint_<call>,
476 *	},
477 *	.event			= &ftrace_event_type_<call>,
478 *	.print_fmt		= print_fmt_<call>,
479 *	.flags			= TRACE_EVENT_FL_TRACEPOINT,
480 * };
481 * // its only safe to use pointers when doing linker tricks to
482 * // create an array.
483 * static struct ftrace_event_call __used
484 * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
485 *
486 */
487
488#ifdef CONFIG_PERF_EVENTS
489
490#define _TRACE_PERF_PROTO(call, proto)					\
491	static notrace void						\
492	perf_trace_##call(void *__data, proto);
493
494#define _TRACE_PERF_INIT(call)						\
495	.perf_probe		= perf_trace_##call,
496
497#else
498#define _TRACE_PERF_PROTO(call, proto)
499#define _TRACE_PERF_INIT(call)
500#endif /* CONFIG_PERF_EVENTS */
501
502#undef __entry
503#define __entry entry
504
505#undef __field
506#define __field(type, item)
507
508#undef __array
509#define __array(type, item, len)
510
511#undef __dynamic_array
512#define __dynamic_array(type, item, len)				\
513	__entry->__data_loc_##item = __data_offsets.item;
514
515#undef __string
516#define __string(item, src) __dynamic_array(char, item, -1)       	\
517
518#undef __assign_str
519#define __assign_str(dst, src)						\
520	strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
521
522#undef TP_fast_assign
523#define TP_fast_assign(args...) args
524
525#undef __perf_addr
526#define __perf_addr(a)	(a)
527
528#undef __perf_count
529#define __perf_count(c)	(c)
530
531#undef __perf_task
532#define __perf_task(t)	(t)
533
534#undef DECLARE_EVENT_CLASS
535#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
536									\
537static notrace void							\
538ftrace_raw_event_##call(void *__data, proto)				\
539{									\
540	struct ftrace_event_file *ftrace_file = __data;			\
541	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
542	struct ftrace_event_buffer fbuffer;				\
543	struct ftrace_raw_##call *entry;				\
 
 
544	int __data_size;						\
 
545									\
546	if (ftrace_trigger_soft_disabled(ftrace_file))			\
547		return;							\
548									\
549	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
550									\
551	entry = ftrace_event_buffer_reserve(&fbuffer, ftrace_file,	\
552				 sizeof(*entry) + __data_size);		\
553									\
554	if (!entry)							\
 
555		return;							\
 
556									\
557	tstruct								\
558									\
559	{ assign; }							\
560									\
561	ftrace_event_buffer_commit(&fbuffer);				\
 
 
562}
563/*
564 * The ftrace_test_probe is compiled out, it is only here as a build time check
565 * to make sure that if the tracepoint handling changes, the ftrace probe will
566 * fail to compile unless it too is updated.
567 */
568
569#undef DEFINE_EVENT
570#define DEFINE_EVENT(template, call, proto, args)			\
571static inline void ftrace_test_probe_##call(void)			\
572{									\
573	check_trace_callback_type_##call(ftrace_raw_event_##template);	\
574}
575
576#undef DEFINE_EVENT_PRINT
577#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
578
579#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
580
581#undef __entry
582#define __entry REC
583
584#undef __print_flags
585#undef __print_symbolic
586#undef __print_hex
587#undef __get_dynamic_array
588#undef __get_str
589
590#undef TP_printk
591#define TP_printk(fmt, args...) "\"" fmt "\", "  __stringify(args)
592
593#undef DECLARE_EVENT_CLASS
594#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
595_TRACE_PERF_PROTO(call, PARAMS(proto));					\
596static const char print_fmt_##call[] = print;				\
597static struct ftrace_event_class __used __refdata event_class_##call = { \
598	.system			= __stringify(TRACE_SYSTEM),		\
599	.define_fields		= ftrace_define_fields_##call,		\
600	.fields			= LIST_HEAD_INIT(event_class_##call.fields),\
601	.raw_init		= trace_event_raw_init,			\
602	.probe			= ftrace_raw_event_##call,		\
603	.reg			= ftrace_event_reg,			\
604	_TRACE_PERF_INIT(call)						\
605};
606
607#undef DEFINE_EVENT
608#define DEFINE_EVENT(template, call, proto, args)			\
609									\
610static struct ftrace_event_call __used event_##call = {			\
 
611	.class			= &event_class_##template,		\
612	{								\
613		.tp			= &__tracepoint_##call,		\
614	},								\
615	.event.funcs		= &ftrace_event_type_funcs_##template,	\
616	.print_fmt		= print_fmt_##template,			\
617	.flags			= TRACE_EVENT_FL_TRACEPOINT,		\
618};									\
619static struct ftrace_event_call __used					\
620__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
621
622#undef DEFINE_EVENT_PRINT
623#define DEFINE_EVENT_PRINT(template, call, proto, args, print)		\
624									\
625static const char print_fmt_##call[] = print;				\
626									\
627static struct ftrace_event_call __used event_##call = {			\
 
628	.class			= &event_class_##template,		\
629	{								\
630		.tp			= &__tracepoint_##call,		\
631	},								\
632	.event.funcs		= &ftrace_event_type_funcs_##call,	\
633	.print_fmt		= print_fmt_##call,			\
634	.flags			= TRACE_EVENT_FL_TRACEPOINT,		\
635};									\
636static struct ftrace_event_call __used					\
637__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
638
639#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
640
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
641
642#ifdef CONFIG_PERF_EVENTS
643
644#undef __entry
645#define __entry entry
646
647#undef __get_dynamic_array
648#define __get_dynamic_array(field)	\
649		((void *)__entry + (__entry->__data_loc_##field & 0xffff))
650
651#undef __get_str
652#define __get_str(field) (char *)__get_dynamic_array(field)
653
654#undef __perf_addr
655#define __perf_addr(a)	(__addr = (a))
656
657#undef __perf_count
658#define __perf_count(c)	(__count = (c))
659
660#undef __perf_task
661#define __perf_task(t)	(__task = (t))
662
663#undef DECLARE_EVENT_CLASS
664#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
665static notrace void							\
666perf_trace_##call(void *__data, proto)					\
667{									\
668	struct ftrace_event_call *event_call = __data;			\
669	struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
670	struct ftrace_raw_##call *entry;				\
671	struct pt_regs __regs;						\
672	u64 __addr = 0, __count = 1;					\
673	struct task_struct *__task = NULL;				\
674	struct hlist_head *head;					\
675	int __entry_size;						\
676	int __data_size;						\
677	int rctx;							\
678									\
 
 
679	__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
680									\
681	head = this_cpu_ptr(event_call->perf_events);			\
682	if (__builtin_constant_p(!__task) && !__task &&			\
683				hlist_empty(head))			\
684		return;							\
685									\
686	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
687			     sizeof(u64));				\
688	__entry_size -= sizeof(u32);					\
689									\
690	perf_fetch_caller_regs(&__regs);				\
691	entry = perf_trace_buf_prepare(__entry_size,			\
692			event_call->event.type, &__regs, &rctx);	\
 
 
 
693	if (!entry)							\
694		return;							\
695									\
696	tstruct								\
697									\
698	{ assign; }							\
699									\
 
700	perf_trace_buf_submit(entry, __entry_size, rctx, __addr,	\
701		__count, &__regs, head, __task);			\
702}
703
704/*
705 * This part is compiled out, it is only here as a build time check
706 * to make sure that if the tracepoint handling changes, the
707 * perf probe will fail to compile unless it too is updated.
708 */
709#undef DEFINE_EVENT
710#define DEFINE_EVENT(template, call, proto, args)			\
711static inline void perf_test_probe_##call(void)				\
712{									\
713	check_trace_callback_type_##call(perf_trace_##template);	\
714}
715
716
717#undef DEFINE_EVENT_PRINT
718#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
719	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
720
721#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
722#endif /* CONFIG_PERF_EVENTS */
 
 
723