Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * auxtrace.h: AUX area trace support
  4 * Copyright (c) 2013-2015, Intel Corporation.
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#ifndef __PERF_AUXTRACE_H
  8#define __PERF_AUXTRACE_H
  9
 10#include <sys/types.h>
 11#include <errno.h>
 12#include <stdbool.h>
 13#include <stddef.h>
 14#include <stdio.h> // FILE
 15#include <linux/list.h>
 16#include <linux/perf_event.h>
 17#include <linux/types.h>
 18#include <asm/bitsperlong.h>
 19#include <asm/barrier.h>
 
 
 
 20
 21union perf_event;
 22struct perf_session;
 23struct evlist;
 24struct perf_tool;
 25struct mmap;
 26struct perf_sample;
 27struct option;
 28struct record_opts;
 29struct perf_record_auxtrace_error;
 30struct perf_record_auxtrace_info;
 31struct events_stats;
 32
 33enum auxtrace_error_type {
 34       PERF_AUXTRACE_ERROR_ITRACE  = 1,
 35       PERF_AUXTRACE_ERROR_MAX
 36};
 37
 38/* Auxtrace records must have the same alignment as perf event records */
 39#define PERF_AUXTRACE_RECORD_ALIGNMENT 8
 40
 41enum auxtrace_type {
 42	PERF_AUXTRACE_UNKNOWN,
 43	PERF_AUXTRACE_INTEL_PT,
 44	PERF_AUXTRACE_INTEL_BTS,
 45	PERF_AUXTRACE_CS_ETM,
 46	PERF_AUXTRACE_ARM_SPE,
 47	PERF_AUXTRACE_S390_CPUMSF,
 48};
 49
 50enum itrace_period_type {
 51	PERF_ITRACE_PERIOD_INSTRUCTIONS,
 52	PERF_ITRACE_PERIOD_TICKS,
 53	PERF_ITRACE_PERIOD_NANOSECS,
 54};
 55
 56/**
 57 * struct itrace_synth_opts - AUX area tracing synthesis options.
 58 * @set: indicates whether or not options have been set
 59 * @default_no_sample: Default to no sampling.
 60 * @inject: indicates the event (not just the sample) must be fully synthesized
 61 *          because 'perf inject' will write it out
 62 * @instructions: whether to synthesize 'instructions' events
 63 * @branches: whether to synthesize 'branches' events
 64 * @transactions: whether to synthesize events for transactions
 65 * @ptwrites: whether to synthesize events for ptwrites
 66 * @pwr_events: whether to synthesize power events
 67 * @other_events: whether to synthesize other events recorded due to the use of
 68 *                aux_output
 69 * @errors: whether to synthesize decoder error events
 70 * @dont_decode: whether to skip decoding entirely
 71 * @log: write a decoding log
 72 * @calls: limit branch samples to calls (can be combined with @returns)
 73 * @returns: limit branch samples to returns (can be combined with @calls)
 74 * @callchain: add callchain to 'instructions' events
 75 * @thread_stack: feed branches to the thread_stack
 76 * @last_branch: add branch context to 'instruction' events
 77 * @callchain_sz: maximum callchain size
 78 * @last_branch_sz: branch context size
 79 * @period: 'instructions' events period
 80 * @period_type: 'instructions' events period type
 81 * @initial_skip: skip N events at the beginning.
 82 * @cpu_bitmap: CPUs for which to synthesize events, or NULL for all
 83 * @ptime_range: time intervals to trace or NULL
 84 * @range_num: number of time intervals to trace
 85 */
 86struct itrace_synth_opts {
 87	bool			set;
 88	bool			default_no_sample;
 89	bool			inject;
 90	bool			instructions;
 91	bool			branches;
 92	bool			transactions;
 93	bool			ptwrites;
 94	bool			pwr_events;
 95	bool			other_events;
 96	bool			errors;
 97	bool			dont_decode;
 98	bool			log;
 99	bool			calls;
100	bool			returns;
101	bool			callchain;
102	bool			thread_stack;
103	bool			last_branch;
104	unsigned int		callchain_sz;
105	unsigned int		last_branch_sz;
106	unsigned long long	period;
107	enum itrace_period_type	period_type;
108	unsigned long		initial_skip;
109	unsigned long		*cpu_bitmap;
110	struct perf_time_interval *ptime_range;
111	int			range_num;
112};
113
114/**
115 * struct auxtrace_index_entry - indexes a AUX area tracing event within a
116 *                               perf.data file.
117 * @file_offset: offset within the perf.data file
118 * @sz: size of the event
119 */
120struct auxtrace_index_entry {
121	u64			file_offset;
122	u64			sz;
123};
124
125#define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256
126
127/**
128 * struct auxtrace_index - index of AUX area tracing events within a perf.data
129 *                         file.
130 * @list: linking a number of arrays of entries
131 * @nr: number of entries
132 * @entries: array of entries
133 */
134struct auxtrace_index {
135	struct list_head	list;
136	size_t			nr;
137	struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
138};
139
140/**
141 * struct auxtrace - session callbacks to allow AUX area data decoding.
142 * @process_event: lets the decoder see all session events
143 * @process_auxtrace_event: process a PERF_RECORD_AUXTRACE event
144 * @flush_events: process any remaining data
145 * @free_events: free resources associated with event processing
146 * @free: free resources associated with the session
147 */
148struct auxtrace {
149	int (*process_event)(struct perf_session *session,
150			     union perf_event *event,
151			     struct perf_sample *sample,
152			     struct perf_tool *tool);
153	int (*process_auxtrace_event)(struct perf_session *session,
154				      union perf_event *event,
155				      struct perf_tool *tool);
156	int (*flush_events)(struct perf_session *session,
157			    struct perf_tool *tool);
158	void (*free_events)(struct perf_session *session);
159	void (*free)(struct perf_session *session);
160};
161
162/**
163 * struct auxtrace_buffer - a buffer containing AUX area tracing data.
164 * @list: buffers are queued in a list held by struct auxtrace_queue
165 * @size: size of the buffer in bytes
166 * @pid: in per-thread mode, the pid this buffer is associated with
167 * @tid: in per-thread mode, the tid this buffer is associated with
168 * @cpu: in per-cpu mode, the cpu this buffer is associated with
169 * @data: actual buffer data (can be null if the data has not been loaded)
170 * @data_offset: file offset at which the buffer can be read
171 * @mmap_addr: mmap address at which the buffer can be read
172 * @mmap_size: size of the mmap at @mmap_addr
173 * @data_needs_freeing: @data was malloc'd so free it when it is no longer
174 *                      needed
175 * @consecutive: the original data was split up and this buffer is consecutive
176 *               to the previous buffer
177 * @offset: offset as determined by aux_head / aux_tail members of struct
178 *          perf_event_mmap_page
179 * @reference: an implementation-specific reference determined when the data is
180 *             recorded
181 * @buffer_nr: used to number each buffer
182 * @use_size: implementation actually only uses this number of bytes
183 * @use_data: implementation actually only uses data starting at this address
184 */
185struct auxtrace_buffer {
186	struct list_head	list;
187	size_t			size;
188	pid_t			pid;
189	pid_t			tid;
190	int			cpu;
191	void			*data;
192	off_t			data_offset;
193	void			*mmap_addr;
194	size_t			mmap_size;
195	bool			data_needs_freeing;
196	bool			consecutive;
197	u64			offset;
198	u64			reference;
199	u64			buffer_nr;
200	size_t			use_size;
201	void			*use_data;
202};
203
204/**
205 * struct auxtrace_queue - a queue of AUX area tracing data buffers.
206 * @head: head of buffer list
207 * @tid: in per-thread mode, the tid this queue is associated with
208 * @cpu: in per-cpu mode, the cpu this queue is associated with
209 * @set: %true once this queue has been dedicated to a specific thread or cpu
210 * @priv: implementation-specific data
211 */
212struct auxtrace_queue {
213	struct list_head	head;
214	pid_t			tid;
215	int			cpu;
216	bool			set;
217	void			*priv;
218};
219
220/**
221 * struct auxtrace_queues - an array of AUX area tracing queues.
222 * @queue_array: array of queues
223 * @nr_queues: number of queues
224 * @new_data: set whenever new data is queued
225 * @populated: queues have been fully populated using the auxtrace_index
226 * @next_buffer_nr: used to number each buffer
227 */
228struct auxtrace_queues {
229	struct auxtrace_queue	*queue_array;
230	unsigned int		nr_queues;
231	bool			new_data;
232	bool			populated;
233	u64			next_buffer_nr;
234};
235
236/**
237 * struct auxtrace_heap_item - element of struct auxtrace_heap.
238 * @queue_nr: queue number
239 * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected
240 *           to be a timestamp
241 */
242struct auxtrace_heap_item {
243	unsigned int		queue_nr;
244	u64			ordinal;
245};
246
247/**
248 * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues.
249 * @heap_array: the heap
250 * @heap_cnt: the number of elements in the heap
251 * @heap_sz: maximum number of elements (grows as needed)
252 */
253struct auxtrace_heap {
254	struct auxtrace_heap_item	*heap_array;
255	unsigned int		heap_cnt;
256	unsigned int		heap_sz;
257};
258
259/**
260 * struct auxtrace_mmap - records an mmap of the auxtrace buffer.
261 * @base: address of mapped area
262 * @userpg: pointer to buffer's perf_event_mmap_page
263 * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
264 * @len: size of mapped area
265 * @prev: previous aux_head
266 * @idx: index of this mmap
267 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
268 *       mmap) otherwise %0
269 * @cpu: cpu number for a per-cpu mmap otherwise %-1
270 */
271struct auxtrace_mmap {
272	void		*base;
273	void		*userpg;
274	size_t		mask;
275	size_t		len;
276	u64		prev;
277	int		idx;
278	pid_t		tid;
279	int		cpu;
280};
281
282/**
283 * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap.
284 * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
285 * @offset: file offset of mapped area
286 * @len: size of mapped area
287 * @prot: mmap memory protection
288 * @idx: index of this mmap
289 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
290 *       mmap) otherwise %0
291 * @cpu: cpu number for a per-cpu mmap otherwise %-1
292 */
293struct auxtrace_mmap_params {
294	size_t		mask;
295	off_t		offset;
296	size_t		len;
297	int		prot;
298	int		idx;
299	pid_t		tid;
300	int		cpu;
301};
302
303/**
304 * struct auxtrace_record - callbacks for recording AUX area data.
305 * @recording_options: validate and process recording options
306 * @info_priv_size: return the size of the private data in auxtrace_info_event
307 * @info_fill: fill-in the private data in auxtrace_info_event
308 * @free: free this auxtrace record structure
309 * @snapshot_start: starting a snapshot
310 * @snapshot_finish: finishing a snapshot
311 * @find_snapshot: find data to snapshot within auxtrace mmap
312 * @parse_snapshot_options: parse snapshot options
313 * @reference: provide a 64-bit reference number for auxtrace_event
314 * @read_finish: called after reading from an auxtrace mmap
315 * @alignment: alignment (if any) for AUX area data
316 */
317struct auxtrace_record {
318	int (*recording_options)(struct auxtrace_record *itr,
319				 struct evlist *evlist,
320				 struct record_opts *opts);
321	size_t (*info_priv_size)(struct auxtrace_record *itr,
322				 struct evlist *evlist);
323	int (*info_fill)(struct auxtrace_record *itr,
324			 struct perf_session *session,
325			 struct perf_record_auxtrace_info *auxtrace_info,
326			 size_t priv_size);
327	void (*free)(struct auxtrace_record *itr);
328	int (*snapshot_start)(struct auxtrace_record *itr);
329	int (*snapshot_finish)(struct auxtrace_record *itr);
330	int (*find_snapshot)(struct auxtrace_record *itr, int idx,
331			     struct auxtrace_mmap *mm, unsigned char *data,
332			     u64 *head, u64 *old);
333	int (*parse_snapshot_options)(struct auxtrace_record *itr,
334				      struct record_opts *opts,
335				      const char *str);
336	u64 (*reference)(struct auxtrace_record *itr);
337	int (*read_finish)(struct auxtrace_record *itr, int idx);
338	unsigned int alignment;
339};
340
341/**
342 * struct addr_filter - address filter.
343 * @list: list node
344 * @range: true if it is a range filter
345 * @start: true if action is 'filter' or 'start'
346 * @action: 'filter', 'start' or 'stop' ('tracestop' is accepted but converted
347 *          to 'stop')
348 * @sym_from: symbol name for the filter address
349 * @sym_to: symbol name that determines the filter size
350 * @sym_from_idx: selects n'th from symbols with the same name (0 means global
351 *                and less than 0 means symbol must be unique)
352 * @sym_to_idx: same as @sym_from_idx but for @sym_to
353 * @addr: filter address
354 * @size: filter region size (for range filters)
355 * @filename: DSO file name or NULL for the kernel
356 * @str: allocated string that contains the other string members
357 */
358struct addr_filter {
359	struct list_head	list;
360	bool			range;
361	bool			start;
362	const char		*action;
363	const char		*sym_from;
364	const char		*sym_to;
365	int			sym_from_idx;
366	int			sym_to_idx;
367	u64			addr;
368	u64			size;
369	const char		*filename;
370	char			*str;
371};
372
373/**
374 * struct addr_filters - list of address filters.
375 * @head: list of address filters
376 * @cnt: number of address filters
377 */
378struct addr_filters {
379	struct list_head	head;
380	int			cnt;
381};
382
383struct auxtrace_cache;
384
385#ifdef HAVE_AUXTRACE_SUPPORT
386
387/*
388 * In snapshot mode the mmapped page is read-only which makes using
389 * __sync_val_compare_and_swap() problematic.  However, snapshot mode expects
390 * the buffer is not updated while the snapshot is made (e.g. Intel PT disables
391 * the event) so there is not a race anyway.
392 */
393static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm)
394{
395	struct perf_event_mmap_page *pc = mm->userpg;
396	u64 head = READ_ONCE(pc->aux_head);
397
398	/* Ensure all reads are done after we read the head */
399	rmb();
400	return head;
401}
402
403static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
404{
405	struct perf_event_mmap_page *pc = mm->userpg;
406#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
407	u64 head = READ_ONCE(pc->aux_head);
408#else
409	u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0);
410#endif
411
412	/* Ensure all reads are done after we read the head */
413	rmb();
414	return head;
415}
416
417static inline void auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
418{
419	struct perf_event_mmap_page *pc = mm->userpg;
420#if BITS_PER_LONG != 64 && defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
421	u64 old_tail;
422#endif
423
424	/* Ensure all reads are done before we write the tail out */
425	mb();
426#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
427	pc->aux_tail = tail;
428#else
429	do {
430		old_tail = __sync_val_compare_and_swap(&pc->aux_tail, 0, 0);
431	} while (!__sync_bool_compare_and_swap(&pc->aux_tail, old_tail, tail));
432#endif
433}
434
435int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
436			struct auxtrace_mmap_params *mp,
437			void *userpg, int fd);
438void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
439void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
440				off_t auxtrace_offset,
441				unsigned int auxtrace_pages,
442				bool auxtrace_overwrite);
443void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
444				   struct evlist *evlist, int idx,
445				   bool per_cpu);
446
447typedef int (*process_auxtrace_t)(struct perf_tool *tool,
448				  struct mmap *map,
449				  union perf_event *event, void *data1,
450				  size_t len1, void *data2, size_t len2);
451
452int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
453			struct perf_tool *tool, process_auxtrace_t fn);
454
455int auxtrace_mmap__read_snapshot(struct mmap *map,
456				 struct auxtrace_record *itr,
457				 struct perf_tool *tool, process_auxtrace_t fn,
458				 size_t snapshot_size);
459
460int auxtrace_queues__init(struct auxtrace_queues *queues);
461int auxtrace_queues__add_event(struct auxtrace_queues *queues,
462			       struct perf_session *session,
463			       union perf_event *event, off_t data_offset,
464			       struct auxtrace_buffer **buffer_ptr);
465void auxtrace_queues__free(struct auxtrace_queues *queues);
466int auxtrace_queues__process_index(struct auxtrace_queues *queues,
467				   struct perf_session *session);
468struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
469					      struct auxtrace_buffer *buffer);
470void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd);
471void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer);
472void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer);
473void auxtrace_buffer__free(struct auxtrace_buffer *buffer);
474
475int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
476		       u64 ordinal);
477void auxtrace_heap__pop(struct auxtrace_heap *heap);
478void auxtrace_heap__free(struct auxtrace_heap *heap);
479
480struct auxtrace_cache_entry {
481	struct hlist_node hash;
482	u32 key;
483};
484
485struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
486					   unsigned int limit_percent);
487void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache);
488void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c);
489void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry);
490int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
491			struct auxtrace_cache_entry *entry);
492void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key);
493
494struct auxtrace_record *auxtrace_record__init(struct evlist *evlist,
495					      int *err);
496
497int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
498				    struct record_opts *opts,
499				    const char *str);
500int auxtrace_record__options(struct auxtrace_record *itr,
501			     struct evlist *evlist,
502			     struct record_opts *opts);
503size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
504				       struct evlist *evlist);
505int auxtrace_record__info_fill(struct auxtrace_record *itr,
506			       struct perf_session *session,
507			       struct perf_record_auxtrace_info *auxtrace_info,
508			       size_t priv_size);
509void auxtrace_record__free(struct auxtrace_record *itr);
510int auxtrace_record__snapshot_start(struct auxtrace_record *itr);
511int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit);
512int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
513				   struct auxtrace_mmap *mm,
514				   unsigned char *data, u64 *head, u64 *old);
515u64 auxtrace_record__reference(struct auxtrace_record *itr);
516
517int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event,
518				   off_t file_offset);
519int auxtrace_index__write(int fd, struct list_head *head);
520int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
521			    bool needs_swap);
522void auxtrace_index__free(struct list_head *head);
523
524void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
525			  int code, int cpu, pid_t pid, pid_t tid, u64 ip,
526			  const char *msg, u64 timestamp);
527
528int perf_event__process_auxtrace_info(struct perf_session *session,
529				      union perf_event *event);
530s64 perf_event__process_auxtrace(struct perf_session *session,
531				 union perf_event *event);
532int perf_event__process_auxtrace_error(struct perf_session *session,
533				       union perf_event *event);
 
 
 
 
 
 
 
534int itrace_parse_synth_opts(const struct option *opt, const char *str,
535			    int unset);
536void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
537				    bool no_sample);
538
539size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp);
540void perf_session__auxtrace_error_inc(struct perf_session *session,
541				      union perf_event *event);
542void events_stats__auxtrace_error_warn(const struct events_stats *stats);
543
544void addr_filters__init(struct addr_filters *filts);
545void addr_filters__exit(struct addr_filters *filts);
546int addr_filters__parse_bare_filter(struct addr_filters *filts,
547				    const char *filter);
548int auxtrace_parse_filters(struct evlist *evlist);
 
 
 
 
 
 
 
 
549
550int auxtrace__process_event(struct perf_session *session, union perf_event *event,
551			    struct perf_sample *sample, struct perf_tool *tool);
552int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool);
553void auxtrace__free_events(struct perf_session *session);
554void auxtrace__free(struct perf_session *session);
555
556#define ITRACE_HELP \
557"				i:	    		synthesize instructions events\n"		\
558"				b:	    		synthesize branches events\n"		\
559"				c:	    		synthesize branches events (calls only)\n"	\
560"				r:	    		synthesize branches events (returns only)\n" \
561"				x:	    		synthesize transactions events\n"		\
562"				w:	    		synthesize ptwrite events\n"		\
563"				p:	    		synthesize power events\n"			\
564"				e:	    		synthesize error events\n"			\
565"				d:	    		create a debug log\n"			\
566"				g[len]:     		synthesize a call chain (use with i or x)\n" \
567"				l[len]:     		synthesize last branch entries (use with i or x)\n" \
568"				sNUMBER:    		skip initial number of events\n"		\
569"				PERIOD[ns|us|ms|i|t]:   specify period to sample stream\n" \
570"				concatenate multiple options. Default is ibxwpe or cewp\n"
571
572static inline
573void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts,
574				       struct perf_time_interval *ptime_range,
575				       int range_num)
576{
577	opts->ptime_range = ptime_range;
578	opts->range_num = range_num;
579}
580
581static inline
582void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
583{
584	opts->ptime_range = NULL;
585	opts->range_num = 0;
 
 
586}
587
588#else
589#include "debug.h"
590
591static inline struct auxtrace_record *
592auxtrace_record__init(struct evlist *evlist __maybe_unused,
593		      int *err)
594{
595	*err = 0;
596	return NULL;
597}
598
599static inline
600void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused)
601{
602}
603
 
 
 
 
 
 
 
 
 
604static inline
605int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
606			     struct evlist *evlist __maybe_unused,
607			     struct record_opts *opts __maybe_unused)
608{
609	return 0;
610}
611
612#define perf_event__process_auxtrace_info		0
613#define perf_event__process_auxtrace			0
614#define perf_event__process_auxtrace_error		0
615
616static inline
617void perf_session__auxtrace_error_inc(struct perf_session *session
618				      __maybe_unused,
619				      union perf_event *event
620				      __maybe_unused)
621{
622}
623
624static inline
625void events_stats__auxtrace_error_warn(const struct events_stats *stats
626				       __maybe_unused)
627{
628}
629
630static inline
631int itrace_parse_synth_opts(const struct option *opt __maybe_unused,
632			    const char *str __maybe_unused,
633			    int unset __maybe_unused)
634{
635	pr_err("AUX area tracing not supported\n");
636	return -EINVAL;
637}
638
639static inline
640int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
641				    struct record_opts *opts __maybe_unused,
642				    const char *str)
643{
644	if (!str)
645		return 0;
646	pr_err("AUX area tracing not supported\n");
647	return -EINVAL;
648}
649
650static inline
651int auxtrace__process_event(struct perf_session *session __maybe_unused,
652			    union perf_event *event __maybe_unused,
653			    struct perf_sample *sample __maybe_unused,
654			    struct perf_tool *tool __maybe_unused)
655{
656	return 0;
657}
658
659static inline
660int auxtrace__flush_events(struct perf_session *session __maybe_unused,
661			   struct perf_tool *tool __maybe_unused)
662{
663	return 0;
664}
665
666static inline
667void auxtrace__free_events(struct perf_session *session __maybe_unused)
668{
669}
670
671static inline
672void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused)
673{
674}
675
676static inline
677void auxtrace__free(struct perf_session *session __maybe_unused)
678{
679}
680
681static inline
682int auxtrace_index__write(int fd __maybe_unused,
683			  struct list_head *head __maybe_unused)
684{
685	return -EINVAL;
686}
687
688static inline
689int auxtrace_index__process(int fd __maybe_unused,
690			    u64 size __maybe_unused,
691			    struct perf_session *session __maybe_unused,
692			    bool needs_swap __maybe_unused)
693{
694	return -EINVAL;
695}
696
697static inline
698void auxtrace_index__free(struct list_head *head __maybe_unused)
699{
700}
701
702static inline
703int auxtrace_parse_filters(struct evlist *evlist __maybe_unused)
704{
705	return 0;
706}
707
708int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
709			struct auxtrace_mmap_params *mp,
710			void *userpg, int fd);
711void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
712void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
713				off_t auxtrace_offset,
714				unsigned int auxtrace_pages,
715				bool auxtrace_overwrite);
716void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
717				   struct evlist *evlist, int idx,
718				   bool per_cpu);
719
720#define ITRACE_HELP ""
721
722static inline
723void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts
724				       __maybe_unused,
725				       struct perf_time_interval *ptime_range
726				       __maybe_unused,
727				       int range_num __maybe_unused)
728{
729}
730
731static inline
732void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts
733					 __maybe_unused)
734{
735}
736
737#endif
738
739#endif
v4.10.11
 
  1/*
  2 * auxtrace.h: AUX area trace support
  3 * Copyright (c) 2013-2015, Intel Corporation.
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms and conditions of the GNU General Public License,
  7 * version 2, as published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 */
 15
 16#ifndef __PERF_AUXTRACE_H
 17#define __PERF_AUXTRACE_H
 18
 19#include <sys/types.h>
 
 20#include <stdbool.h>
 21#include <stddef.h>
 
 22#include <linux/list.h>
 23#include <linux/perf_event.h>
 24#include <linux/types.h>
 25
 26#include "../perf.h"
 27#include "event.h"
 28#include "session.h"
 29#include "debug.h"
 30
 31union perf_event;
 32struct perf_session;
 33struct perf_evlist;
 34struct perf_tool;
 
 
 35struct option;
 36struct record_opts;
 37struct auxtrace_info_event;
 
 38struct events_stats;
 39
 
 
 
 
 
 
 
 
 40enum auxtrace_type {
 41	PERF_AUXTRACE_UNKNOWN,
 42	PERF_AUXTRACE_INTEL_PT,
 43	PERF_AUXTRACE_INTEL_BTS,
 44	PERF_AUXTRACE_CS_ETM,
 
 
 45};
 46
 47enum itrace_period_type {
 48	PERF_ITRACE_PERIOD_INSTRUCTIONS,
 49	PERF_ITRACE_PERIOD_TICKS,
 50	PERF_ITRACE_PERIOD_NANOSECS,
 51};
 52
 53/**
 54 * struct itrace_synth_opts - AUX area tracing synthesis options.
 55 * @set: indicates whether or not options have been set
 
 56 * @inject: indicates the event (not just the sample) must be fully synthesized
 57 *          because 'perf inject' will write it out
 58 * @instructions: whether to synthesize 'instructions' events
 59 * @branches: whether to synthesize 'branches' events
 60 * @transactions: whether to synthesize events for transactions
 
 
 
 
 61 * @errors: whether to synthesize decoder error events
 62 * @dont_decode: whether to skip decoding entirely
 63 * @log: write a decoding log
 64 * @calls: limit branch samples to calls (can be combined with @returns)
 65 * @returns: limit branch samples to returns (can be combined with @calls)
 66 * @callchain: add callchain to 'instructions' events
 67 * @thread_stack: feed branches to the thread_stack
 68 * @last_branch: add branch context to 'instruction' events
 69 * @callchain_sz: maximum callchain size
 70 * @last_branch_sz: branch context size
 71 * @period: 'instructions' events period
 72 * @period_type: 'instructions' events period type
 73 * @initial_skip: skip N events at the beginning.
 
 
 
 74 */
 75struct itrace_synth_opts {
 76	bool			set;
 
 77	bool			inject;
 78	bool			instructions;
 79	bool			branches;
 80	bool			transactions;
 
 
 
 81	bool			errors;
 82	bool			dont_decode;
 83	bool			log;
 84	bool			calls;
 85	bool			returns;
 86	bool			callchain;
 87	bool			thread_stack;
 88	bool			last_branch;
 89	unsigned int		callchain_sz;
 90	unsigned int		last_branch_sz;
 91	unsigned long long	period;
 92	enum itrace_period_type	period_type;
 93	unsigned long		initial_skip;
 
 
 
 94};
 95
 96/**
 97 * struct auxtrace_index_entry - indexes a AUX area tracing event within a
 98 *                               perf.data file.
 99 * @file_offset: offset within the perf.data file
100 * @sz: size of the event
101 */
102struct auxtrace_index_entry {
103	u64			file_offset;
104	u64			sz;
105};
106
107#define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256
108
109/**
110 * struct auxtrace_index - index of AUX area tracing events within a perf.data
111 *                         file.
112 * @list: linking a number of arrays of entries
113 * @nr: number of entries
114 * @entries: array of entries
115 */
116struct auxtrace_index {
117	struct list_head	list;
118	size_t			nr;
119	struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
120};
121
122/**
123 * struct auxtrace - session callbacks to allow AUX area data decoding.
124 * @process_event: lets the decoder see all session events
 
125 * @flush_events: process any remaining data
126 * @free_events: free resources associated with event processing
127 * @free: free resources associated with the session
128 */
129struct auxtrace {
130	int (*process_event)(struct perf_session *session,
131			     union perf_event *event,
132			     struct perf_sample *sample,
133			     struct perf_tool *tool);
134	int (*process_auxtrace_event)(struct perf_session *session,
135				      union perf_event *event,
136				      struct perf_tool *tool);
137	int (*flush_events)(struct perf_session *session,
138			    struct perf_tool *tool);
139	void (*free_events)(struct perf_session *session);
140	void (*free)(struct perf_session *session);
141};
142
143/**
144 * struct auxtrace_buffer - a buffer containing AUX area tracing data.
145 * @list: buffers are queued in a list held by struct auxtrace_queue
146 * @size: size of the buffer in bytes
147 * @pid: in per-thread mode, the pid this buffer is associated with
148 * @tid: in per-thread mode, the tid this buffer is associated with
149 * @cpu: in per-cpu mode, the cpu this buffer is associated with
150 * @data: actual buffer data (can be null if the data has not been loaded)
151 * @data_offset: file offset at which the buffer can be read
152 * @mmap_addr: mmap address at which the buffer can be read
153 * @mmap_size: size of the mmap at @mmap_addr
154 * @data_needs_freeing: @data was malloc'd so free it when it is no longer
155 *                      needed
156 * @consecutive: the original data was split up and this buffer is consecutive
157 *               to the previous buffer
158 * @offset: offset as determined by aux_head / aux_tail members of struct
159 *          perf_event_mmap_page
160 * @reference: an implementation-specific reference determined when the data is
161 *             recorded
162 * @buffer_nr: used to number each buffer
163 * @use_size: implementation actually only uses this number of bytes
164 * @use_data: implementation actually only uses data starting at this address
165 */
166struct auxtrace_buffer {
167	struct list_head	list;
168	size_t			size;
169	pid_t			pid;
170	pid_t			tid;
171	int			cpu;
172	void			*data;
173	off_t			data_offset;
174	void			*mmap_addr;
175	size_t			mmap_size;
176	bool			data_needs_freeing;
177	bool			consecutive;
178	u64			offset;
179	u64			reference;
180	u64			buffer_nr;
181	size_t			use_size;
182	void			*use_data;
183};
184
185/**
186 * struct auxtrace_queue - a queue of AUX area tracing data buffers.
187 * @head: head of buffer list
188 * @tid: in per-thread mode, the tid this queue is associated with
189 * @cpu: in per-cpu mode, the cpu this queue is associated with
190 * @set: %true once this queue has been dedicated to a specific thread or cpu
191 * @priv: implementation-specific data
192 */
193struct auxtrace_queue {
194	struct list_head	head;
195	pid_t			tid;
196	int			cpu;
197	bool			set;
198	void			*priv;
199};
200
201/**
202 * struct auxtrace_queues - an array of AUX area tracing queues.
203 * @queue_array: array of queues
204 * @nr_queues: number of queues
205 * @new_data: set whenever new data is queued
206 * @populated: queues have been fully populated using the auxtrace_index
207 * @next_buffer_nr: used to number each buffer
208 */
209struct auxtrace_queues {
210	struct auxtrace_queue	*queue_array;
211	unsigned int		nr_queues;
212	bool			new_data;
213	bool			populated;
214	u64			next_buffer_nr;
215};
216
217/**
218 * struct auxtrace_heap_item - element of struct auxtrace_heap.
219 * @queue_nr: queue number
220 * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected
221 *           to be a timestamp
222 */
223struct auxtrace_heap_item {
224	unsigned int		queue_nr;
225	u64			ordinal;
226};
227
228/**
229 * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues.
230 * @heap_array: the heap
231 * @heap_cnt: the number of elements in the heap
232 * @heap_sz: maximum number of elements (grows as needed)
233 */
234struct auxtrace_heap {
235	struct auxtrace_heap_item	*heap_array;
236	unsigned int		heap_cnt;
237	unsigned int		heap_sz;
238};
239
240/**
241 * struct auxtrace_mmap - records an mmap of the auxtrace buffer.
242 * @base: address of mapped area
243 * @userpg: pointer to buffer's perf_event_mmap_page
244 * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
245 * @len: size of mapped area
246 * @prev: previous aux_head
247 * @idx: index of this mmap
248 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
249 *       mmap) otherwise %0
250 * @cpu: cpu number for a per-cpu mmap otherwise %-1
251 */
252struct auxtrace_mmap {
253	void		*base;
254	void		*userpg;
255	size_t		mask;
256	size_t		len;
257	u64		prev;
258	int		idx;
259	pid_t		tid;
260	int		cpu;
261};
262
263/**
264 * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap.
265 * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
266 * @offset: file offset of mapped area
267 * @len: size of mapped area
268 * @prot: mmap memory protection
269 * @idx: index of this mmap
270 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
271 *       mmap) otherwise %0
272 * @cpu: cpu number for a per-cpu mmap otherwise %-1
273 */
274struct auxtrace_mmap_params {
275	size_t		mask;
276	off_t		offset;
277	size_t		len;
278	int		prot;
279	int		idx;
280	pid_t		tid;
281	int		cpu;
282};
283
284/**
285 * struct auxtrace_record - callbacks for recording AUX area data.
286 * @recording_options: validate and process recording options
287 * @info_priv_size: return the size of the private data in auxtrace_info_event
288 * @info_fill: fill-in the private data in auxtrace_info_event
289 * @free: free this auxtrace record structure
290 * @snapshot_start: starting a snapshot
291 * @snapshot_finish: finishing a snapshot
292 * @find_snapshot: find data to snapshot within auxtrace mmap
293 * @parse_snapshot_options: parse snapshot options
294 * @reference: provide a 64-bit reference number for auxtrace_event
295 * @read_finish: called after reading from an auxtrace mmap
 
296 */
297struct auxtrace_record {
298	int (*recording_options)(struct auxtrace_record *itr,
299				 struct perf_evlist *evlist,
300				 struct record_opts *opts);
301	size_t (*info_priv_size)(struct auxtrace_record *itr,
302				 struct perf_evlist *evlist);
303	int (*info_fill)(struct auxtrace_record *itr,
304			 struct perf_session *session,
305			 struct auxtrace_info_event *auxtrace_info,
306			 size_t priv_size);
307	void (*free)(struct auxtrace_record *itr);
308	int (*snapshot_start)(struct auxtrace_record *itr);
309	int (*snapshot_finish)(struct auxtrace_record *itr);
310	int (*find_snapshot)(struct auxtrace_record *itr, int idx,
311			     struct auxtrace_mmap *mm, unsigned char *data,
312			     u64 *head, u64 *old);
313	int (*parse_snapshot_options)(struct auxtrace_record *itr,
314				      struct record_opts *opts,
315				      const char *str);
316	u64 (*reference)(struct auxtrace_record *itr);
317	int (*read_finish)(struct auxtrace_record *itr, int idx);
318	unsigned int alignment;
319};
320
321/**
322 * struct addr_filter - address filter.
323 * @list: list node
324 * @range: true if it is a range filter
325 * @start: true if action is 'filter' or 'start'
326 * @action: 'filter', 'start' or 'stop' ('tracestop' is accepted but converted
327 *          to 'stop')
328 * @sym_from: symbol name for the filter address
329 * @sym_to: symbol name that determines the filter size
330 * @sym_from_idx: selects n'th from symbols with the same name (0 means global
331 *                and less than 0 means symbol must be unique)
332 * @sym_to_idx: same as @sym_from_idx but for @sym_to
333 * @addr: filter address
334 * @size: filter region size (for range filters)
335 * @filename: DSO file name or NULL for the kernel
336 * @str: allocated string that contains the other string members
337 */
338struct addr_filter {
339	struct list_head	list;
340	bool			range;
341	bool			start;
342	const char		*action;
343	const char		*sym_from;
344	const char		*sym_to;
345	int			sym_from_idx;
346	int			sym_to_idx;
347	u64			addr;
348	u64			size;
349	const char		*filename;
350	char			*str;
351};
352
353/**
354 * struct addr_filters - list of address filters.
355 * @head: list of address filters
356 * @cnt: number of address filters
357 */
358struct addr_filters {
359	struct list_head	head;
360	int			cnt;
361};
362
 
 
363#ifdef HAVE_AUXTRACE_SUPPORT
364
365/*
366 * In snapshot mode the mmapped page is read-only which makes using
367 * __sync_val_compare_and_swap() problematic.  However, snapshot mode expects
368 * the buffer is not updated while the snapshot is made (e.g. Intel PT disables
369 * the event) so there is not a race anyway.
370 */
371static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm)
372{
373	struct perf_event_mmap_page *pc = mm->userpg;
374	u64 head = ACCESS_ONCE(pc->aux_head);
375
376	/* Ensure all reads are done after we read the head */
377	rmb();
378	return head;
379}
380
381static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
382{
383	struct perf_event_mmap_page *pc = mm->userpg;
384#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
385	u64 head = ACCESS_ONCE(pc->aux_head);
386#else
387	u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0);
388#endif
389
390	/* Ensure all reads are done after we read the head */
391	rmb();
392	return head;
393}
394
395static inline void auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
396{
397	struct perf_event_mmap_page *pc = mm->userpg;
398#if BITS_PER_LONG != 64 && defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
399	u64 old_tail;
400#endif
401
402	/* Ensure all reads are done before we write the tail out */
403	mb();
404#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
405	pc->aux_tail = tail;
406#else
407	do {
408		old_tail = __sync_val_compare_and_swap(&pc->aux_tail, 0, 0);
409	} while (!__sync_bool_compare_and_swap(&pc->aux_tail, old_tail, tail));
410#endif
411}
412
413int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
414			struct auxtrace_mmap_params *mp,
415			void *userpg, int fd);
416void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
417void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
418				off_t auxtrace_offset,
419				unsigned int auxtrace_pages,
420				bool auxtrace_overwrite);
421void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
422				   struct perf_evlist *evlist, int idx,
423				   bool per_cpu);
424
425typedef int (*process_auxtrace_t)(struct perf_tool *tool,
 
426				  union perf_event *event, void *data1,
427				  size_t len1, void *data2, size_t len2);
428
429int auxtrace_mmap__read(struct auxtrace_mmap *mm, struct auxtrace_record *itr,
430			struct perf_tool *tool, process_auxtrace_t fn);
431
432int auxtrace_mmap__read_snapshot(struct auxtrace_mmap *mm,
433				 struct auxtrace_record *itr,
434				 struct perf_tool *tool, process_auxtrace_t fn,
435				 size_t snapshot_size);
436
437int auxtrace_queues__init(struct auxtrace_queues *queues);
438int auxtrace_queues__add_event(struct auxtrace_queues *queues,
439			       struct perf_session *session,
440			       union perf_event *event, off_t data_offset,
441			       struct auxtrace_buffer **buffer_ptr);
442void auxtrace_queues__free(struct auxtrace_queues *queues);
443int auxtrace_queues__process_index(struct auxtrace_queues *queues,
444				   struct perf_session *session);
445struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
446					      struct auxtrace_buffer *buffer);
447void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd);
448void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer);
449void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer);
450void auxtrace_buffer__free(struct auxtrace_buffer *buffer);
451
452int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
453		       u64 ordinal);
454void auxtrace_heap__pop(struct auxtrace_heap *heap);
455void auxtrace_heap__free(struct auxtrace_heap *heap);
456
457struct auxtrace_cache_entry {
458	struct hlist_node hash;
459	u32 key;
460};
461
462struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
463					   unsigned int limit_percent);
464void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache);
465void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c);
466void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry);
467int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
468			struct auxtrace_cache_entry *entry);
469void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key);
470
471struct auxtrace_record *auxtrace_record__init(struct perf_evlist *evlist,
472					      int *err);
473
474int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
475				    struct record_opts *opts,
476				    const char *str);
477int auxtrace_record__options(struct auxtrace_record *itr,
478			     struct perf_evlist *evlist,
479			     struct record_opts *opts);
480size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
481				       struct perf_evlist *evlist);
482int auxtrace_record__info_fill(struct auxtrace_record *itr,
483			       struct perf_session *session,
484			       struct auxtrace_info_event *auxtrace_info,
485			       size_t priv_size);
486void auxtrace_record__free(struct auxtrace_record *itr);
487int auxtrace_record__snapshot_start(struct auxtrace_record *itr);
488int auxtrace_record__snapshot_finish(struct auxtrace_record *itr);
489int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
490				   struct auxtrace_mmap *mm,
491				   unsigned char *data, u64 *head, u64 *old);
492u64 auxtrace_record__reference(struct auxtrace_record *itr);
493
494int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event,
495				   off_t file_offset);
496int auxtrace_index__write(int fd, struct list_head *head);
497int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
498			    bool needs_swap);
499void auxtrace_index__free(struct list_head *head);
500
501void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
502			  int code, int cpu, pid_t pid, pid_t tid, u64 ip,
503			  const char *msg);
504
505int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
506					 struct perf_tool *tool,
507					 struct perf_session *session,
508					 perf_event__handler_t process);
509int perf_event__process_auxtrace_info(struct perf_tool *tool,
510				      union perf_event *event,
511				      struct perf_session *session);
512s64 perf_event__process_auxtrace(struct perf_tool *tool,
513				 union perf_event *event,
514				 struct perf_session *session);
515int perf_event__process_auxtrace_error(struct perf_tool *tool,
516				       union perf_event *event,
517				       struct perf_session *session);
518int itrace_parse_synth_opts(const struct option *opt, const char *str,
519			    int unset);
520void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts);
 
521
522size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp);
523void perf_session__auxtrace_error_inc(struct perf_session *session,
524				      union perf_event *event);
525void events_stats__auxtrace_error_warn(const struct events_stats *stats);
526
527void addr_filters__init(struct addr_filters *filts);
528void addr_filters__exit(struct addr_filters *filts);
529int addr_filters__parse_bare_filter(struct addr_filters *filts,
530				    const char *filter);
531int auxtrace_parse_filters(struct perf_evlist *evlist);
532
533static inline int auxtrace__process_event(struct perf_session *session,
534					  union perf_event *event,
535					  struct perf_sample *sample,
536					  struct perf_tool *tool)
537{
538	if (!session->auxtrace)
539		return 0;
540
541	return session->auxtrace->process_event(session, event, sample, tool);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
542}
543
544static inline int auxtrace__flush_events(struct perf_session *session,
545					 struct perf_tool *tool)
546{
547	if (!session->auxtrace)
548		return 0;
549
550	return session->auxtrace->flush_events(session, tool);
551}
552
553static inline void auxtrace__free_events(struct perf_session *session)
554{
555	if (!session->auxtrace)
556		return;
557
558	return session->auxtrace->free_events(session);
559}
560
561static inline void auxtrace__free(struct perf_session *session)
562{
563	if (!session->auxtrace)
564		return;
565
566	return session->auxtrace->free(session);
567}
568
569#else
 
570
571static inline struct auxtrace_record *
572auxtrace_record__init(struct perf_evlist *evlist __maybe_unused,
573		      int *err)
574{
575	*err = 0;
576	return NULL;
577}
578
579static inline
580void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused)
581{
582}
583
584static inline int
585perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr __maybe_unused,
586				     struct perf_tool *tool __maybe_unused,
587				     struct perf_session *session __maybe_unused,
588				     perf_event__handler_t process __maybe_unused)
589{
590	return -EINVAL;
591}
592
593static inline
594int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
595			     struct perf_evlist *evlist __maybe_unused,
596			     struct record_opts *opts __maybe_unused)
597{
598	return 0;
599}
600
601#define perf_event__process_auxtrace_info		0
602#define perf_event__process_auxtrace			0
603#define perf_event__process_auxtrace_error		0
604
605static inline
606void perf_session__auxtrace_error_inc(struct perf_session *session
607				      __maybe_unused,
608				      union perf_event *event
609				      __maybe_unused)
610{
611}
612
613static inline
614void events_stats__auxtrace_error_warn(const struct events_stats *stats
615				       __maybe_unused)
616{
617}
618
619static inline
620int itrace_parse_synth_opts(const struct option *opt __maybe_unused,
621			    const char *str __maybe_unused,
622			    int unset __maybe_unused)
623{
624	pr_err("AUX area tracing not supported\n");
625	return -EINVAL;
626}
627
628static inline
629int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
630				    struct record_opts *opts __maybe_unused,
631				    const char *str)
632{
633	if (!str)
634		return 0;
635	pr_err("AUX area tracing not supported\n");
636	return -EINVAL;
637}
638
639static inline
640int auxtrace__process_event(struct perf_session *session __maybe_unused,
641			    union perf_event *event __maybe_unused,
642			    struct perf_sample *sample __maybe_unused,
643			    struct perf_tool *tool __maybe_unused)
644{
645	return 0;
646}
647
648static inline
649int auxtrace__flush_events(struct perf_session *session __maybe_unused,
650			   struct perf_tool *tool __maybe_unused)
651{
652	return 0;
653}
654
655static inline
656void auxtrace__free_events(struct perf_session *session __maybe_unused)
657{
658}
659
660static inline
661void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused)
662{
663}
664
665static inline
666void auxtrace__free(struct perf_session *session __maybe_unused)
667{
668}
669
670static inline
671int auxtrace_index__write(int fd __maybe_unused,
672			  struct list_head *head __maybe_unused)
673{
674	return -EINVAL;
675}
676
677static inline
678int auxtrace_index__process(int fd __maybe_unused,
679			    u64 size __maybe_unused,
680			    struct perf_session *session __maybe_unused,
681			    bool needs_swap __maybe_unused)
682{
683	return -EINVAL;
684}
685
686static inline
687void auxtrace_index__free(struct list_head *head __maybe_unused)
688{
689}
690
691static inline
692int auxtrace_parse_filters(struct perf_evlist *evlist __maybe_unused)
693{
694	return 0;
695}
696
697int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
698			struct auxtrace_mmap_params *mp,
699			void *userpg, int fd);
700void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
701void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
702				off_t auxtrace_offset,
703				unsigned int auxtrace_pages,
704				bool auxtrace_overwrite);
705void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
706				   struct perf_evlist *evlist, int idx,
707				   bool per_cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
708
709#endif
710
711#endif