Linux Audio

Check our new training course

Loading...
v4.6
 
  1#
  2# Architectures that offer an FUNCTION_TRACER implementation should
  3#  select HAVE_FUNCTION_TRACER:
  4#
  5
  6config USER_STACKTRACE_SUPPORT
  7	bool
  8
  9config NOP_TRACER
 10	bool
 11
 12config HAVE_FTRACE_NMI_ENTER
 13	bool
 14	help
 15	  See Documentation/trace/ftrace-design.txt
 16
 17config HAVE_FUNCTION_TRACER
 18	bool
 19	help
 20	  See Documentation/trace/ftrace-design.txt
 21
 22config HAVE_FUNCTION_GRAPH_TRACER
 23	bool
 24	help
 25	  See Documentation/trace/ftrace-design.txt
 26
 27config HAVE_FUNCTION_GRAPH_FP_TEST
 28	bool
 29	help
 30	  See Documentation/trace/ftrace-design.txt
 31
 32config HAVE_DYNAMIC_FTRACE
 33	bool
 34	help
 35	  See Documentation/trace/ftrace-design.txt
 36
 37config HAVE_DYNAMIC_FTRACE_WITH_REGS
 38	bool
 39
 
 
 
 
 
 
 
 
 
 
 
 
 40config HAVE_FTRACE_MCOUNT_RECORD
 41	bool
 42	help
 43	  See Documentation/trace/ftrace-design.txt
 44
 45config HAVE_SYSCALL_TRACEPOINTS
 46	bool
 47	help
 48	  See Documentation/trace/ftrace-design.txt
 49
 50config HAVE_FENTRY
 51	bool
 52	help
 53	  Arch supports the gcc options -pg with -mfentry
 54
 
 
 
 
 
 
 
 
 
 
 55config HAVE_C_RECORDMCOUNT
 56	bool
 57	help
 58	  C version of recordmcount available?
 59
 60config TRACER_MAX_TRACE
 61	bool
 62
 63config TRACE_CLOCK
 64	bool
 65
 66config RING_BUFFER
 67	bool
 68	select TRACE_CLOCK
 69	select IRQ_WORK
 70
 71config FTRACE_NMI_ENTER
 72       bool
 73       depends on HAVE_FTRACE_NMI_ENTER
 74       default y
 75
 76config EVENT_TRACING
 77	select CONTEXT_SWITCH_TRACER
 
 78	bool
 79
 80config CONTEXT_SWITCH_TRACER
 81	bool
 82
 83config RING_BUFFER_ALLOW_SWAP
 84	bool
 85	help
 86	 Allow the use of ring_buffer_swap_cpu.
 87	 Adds a very slight overhead to tracing when enabled.
 88
 
 
 
 
 
 
 
 
 
 89# All tracer options should select GENERIC_TRACER. For those options that are
 90# enabled by all tracers (context switch and event tracer) they select TRACING.
 91# This allows those options to appear when no other tracer is selected. But the
 92# options do not appear when something else selects it. We need the two options
 93# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
 94# hiding of the automatic options.
 95
 96config TRACING
 97	bool
 98	select DEBUG_FS
 99	select RING_BUFFER
100	select STACKTRACE if STACKTRACE_SUPPORT
101	select TRACEPOINTS
102	select NOP_TRACER
103	select BINARY_PRINTF
104	select EVENT_TRACING
105	select TRACE_CLOCK
106
107config GENERIC_TRACER
108	bool
109	select TRACING
110
111#
112# Minimum requirements an architecture has to meet for us to
113# be able to offer generic tracing facilities:
114#
115config TRACING_SUPPORT
116	bool
117	# PPC32 has no irqflags tracing support, but it can use most of the
118	# tracers anyway, they were tested to build and work. Note that new
119	# exceptions to this list aren't welcomed, better implement the
120	# irqflags tracing for your architecture.
121	depends on TRACE_IRQFLAGS_SUPPORT || PPC32
122	depends on STACKTRACE_SUPPORT
123	default y
124
125if TRACING_SUPPORT
126
127menuconfig FTRACE
128	bool "Tracers"
129	default y if DEBUG_KERNEL
130	help
131	  Enable the kernel tracing infrastructure.
132
133if FTRACE
134
 
 
 
 
 
 
 
 
 
135config FUNCTION_TRACER
136	bool "Kernel Function Tracer"
137	depends on HAVE_FUNCTION_TRACER
138	select KALLSYMS
139	select GENERIC_TRACER
140	select CONTEXT_SWITCH_TRACER
 
 
 
141	help
142	  Enable the kernel to trace every kernel function. This is done
143	  by using a compiler feature to insert a small, 5-byte No-Operation
144	  instruction at the beginning of every kernel function, which NOP
145	  sequence is then dynamically patched into a tracer call when
146	  tracing is enabled by the administrator. If it's runtime disabled
147	  (the bootup default), then the overhead of the instructions is very
148	  small and not measurable even in micro-benchmarks.
149
150config FUNCTION_GRAPH_TRACER
151	bool "Kernel Function Graph Tracer"
152	depends on HAVE_FUNCTION_GRAPH_TRACER
153	depends on FUNCTION_TRACER
154	depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
155	default y
156	help
157	  Enable the kernel to trace a function at both its return
158	  and its entry.
159	  Its first purpose is to trace the duration of functions and
160	  draw a call graph for each thread with some information like
161	  the return value. This is done by setting the current return
162	  address on the current task structure into a stack of calls.
163
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
165config IRQSOFF_TRACER
166	bool "Interrupts-off Latency Tracer"
167	default n
168	depends on TRACE_IRQFLAGS_SUPPORT
169	depends on !ARCH_USES_GETTIMEOFFSET
170	select TRACE_IRQFLAGS
171	select GENERIC_TRACER
172	select TRACER_MAX_TRACE
173	select RING_BUFFER_ALLOW_SWAP
174	select TRACER_SNAPSHOT
175	select TRACER_SNAPSHOT_PER_CPU_SWAP
176	help
177	  This option measures the time spent in irqs-off critical
178	  sections, with microsecond accuracy.
179
180	  The default measurement method is a maximum search, which is
181	  disabled by default and can be runtime (re-)started
182	  via:
183
184	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
185
186	  (Note that kernel size and overhead increase with this option
187	  enabled. This option and the preempt-off timing option can be
188	  used together or separately.)
189
190config PREEMPT_TRACER
191	bool "Preemption-off Latency Tracer"
192	default n
193	depends on !ARCH_USES_GETTIMEOFFSET
194	depends on PREEMPT
195	select GENERIC_TRACER
196	select TRACER_MAX_TRACE
197	select RING_BUFFER_ALLOW_SWAP
198	select TRACER_SNAPSHOT
199	select TRACER_SNAPSHOT_PER_CPU_SWAP
 
200	help
201	  This option measures the time spent in preemption-off critical
202	  sections, with microsecond accuracy.
203
204	  The default measurement method is a maximum search, which is
205	  disabled by default and can be runtime (re-)started
206	  via:
207
208	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
209
210	  (Note that kernel size and overhead increase with this option
211	  enabled. This option and the irqs-off timing option can be
212	  used together or separately.)
213
214config SCHED_TRACER
215	bool "Scheduling Latency Tracer"
216	select GENERIC_TRACER
217	select CONTEXT_SWITCH_TRACER
218	select TRACER_MAX_TRACE
219	select TRACER_SNAPSHOT
220	help
221	  This tracer tracks the latency of the highest priority task
222	  to be scheduled in, starting from the point it has woken up.
223
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224config ENABLE_DEFAULT_TRACERS
225	bool "Trace process context switches and events"
226	depends on !GENERIC_TRACER
227	select TRACING
228	help
229	  This tracer hooks to various trace points in the kernel,
230	  allowing the user to pick and choose which trace point they
231	  want to trace. It also includes the sched_switch tracer plugin.
232
233config FTRACE_SYSCALLS
234	bool "Trace syscalls"
235	depends on HAVE_SYSCALL_TRACEPOINTS
236	select GENERIC_TRACER
237	select KALLSYMS
238	help
239	  Basic tracer to catch the syscall entry and exit events.
240
241config TRACER_SNAPSHOT
242	bool "Create a snapshot trace buffer"
243	select TRACER_MAX_TRACE
244	help
245	  Allow tracing users to take snapshot of the current buffer using the
246	  ftrace interface, e.g.:
247
248	      echo 1 > /sys/kernel/debug/tracing/snapshot
249	      cat snapshot
250
251config TRACER_SNAPSHOT_PER_CPU_SWAP
252        bool "Allow snapshot to swap per CPU"
253	depends on TRACER_SNAPSHOT
254	select RING_BUFFER_ALLOW_SWAP
255	help
256	  Allow doing a snapshot of a single CPU buffer instead of a
257	  full swap (all buffers). If this is set, then the following is
258	  allowed:
259
260	      echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
261
262	  After which, only the tracing buffer for CPU 2 was swapped with
263	  the main tracing buffer, and the other CPU buffers remain the same.
264
265	  When this is enabled, this adds a little more overhead to the
266	  trace recording, as it needs to add some checks to synchronize
267	  recording with swaps. But this does not affect the performance
268	  of the overall system. This is enabled by default when the preempt
269	  or irq latency tracers are enabled, as those need to swap as well
270	  and already adds the overhead (plus a lot more).
271
272config TRACE_BRANCH_PROFILING
273	bool
274	select GENERIC_TRACER
275
276choice
277	prompt "Branch Profiling"
278	default BRANCH_PROFILE_NONE
279	help
280	 The branch profiling is a software profiler. It will add hooks
281	 into the C conditionals to test which path a branch takes.
282
283	 The likely/unlikely profiler only looks at the conditions that
284	 are annotated with a likely or unlikely macro.
285
286	 The "all branch" profiler will profile every if-statement in the
287	 kernel. This profiler will also enable the likely/unlikely
288	 profiler.
289
290	 Either of the above profilers adds a bit of overhead to the system.
291	 If unsure, choose "No branch profiling".
292
293config BRANCH_PROFILE_NONE
294	bool "No branch profiling"
295	help
296	  No branch profiling. Branch profiling adds a bit of overhead.
297	  Only enable it if you want to analyse the branching behavior.
298	  Otherwise keep it disabled.
299
300config PROFILE_ANNOTATED_BRANCHES
301	bool "Trace likely/unlikely profiler"
302	select TRACE_BRANCH_PROFILING
303	help
304	  This tracer profiles all likely and unlikely macros
305	  in the kernel. It will display the results in:
306
307	  /sys/kernel/debug/tracing/trace_stat/branch_annotated
308
309	  Note: this will add a significant overhead; only turn this
310	  on if you need to profile the system's use of these macros.
311
312config PROFILE_ALL_BRANCHES
313	bool "Profile all if conditionals"
314	select TRACE_BRANCH_PROFILING
315	help
316	  This tracer profiles all branch conditions. Every if ()
317	  taken in the kernel is recorded whether it hit or miss.
318	  The results will be displayed in:
319
320	  /sys/kernel/debug/tracing/trace_stat/branch_all
321
322	  This option also enables the likely/unlikely profiler.
323
324	  This configuration, when enabled, will impose a great overhead
325	  on the system. This should only be enabled when the system
326	  is to be analyzed in much detail.
327endchoice
328
329config TRACING_BRANCHES
330	bool
331	help
332	  Selected by tracers that will trace the likely and unlikely
333	  conditions. This prevents the tracers themselves from being
334	  profiled. Profiling the tracing infrastructure can only happen
335	  when the likelys and unlikelys are not being traced.
336
337config BRANCH_TRACER
338	bool "Trace likely/unlikely instances"
339	depends on TRACE_BRANCH_PROFILING
340	select TRACING_BRANCHES
341	help
342	  This traces the events of likely and unlikely condition
343	  calls in the kernel.  The difference between this and the
344	  "Trace likely/unlikely profiler" is that this is not a
345	  histogram of the callers, but actually places the calling
346	  events into a running trace buffer to see when and where the
347	  events happened, as well as their results.
348
349	  Say N if unsure.
350
351config STACK_TRACER
352	bool "Trace max stack"
353	depends on HAVE_FUNCTION_TRACER
354	select FUNCTION_TRACER
355	select STACKTRACE
356	select KALLSYMS
357	help
358	  This special tracer records the maximum stack footprint of the
359	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
360
361	  This tracer works by hooking into every function call that the
362	  kernel executes, and keeping a maximum stack depth value and
363	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
364	  then it will not have any overhead while the stack tracer
365	  is disabled.
366
367	  To enable the stack tracer on bootup, pass in 'stacktrace'
368	  on the kernel command line.
369
370	  The stack tracer can also be enabled or disabled via the
371	  sysctl kernel.stack_tracer_enabled
372
373	  Say N if unsure.
374
375config BLK_DEV_IO_TRACE
376	bool "Support for tracing block IO actions"
377	depends on SYSFS
378	depends on BLOCK
379	select RELAY
380	select DEBUG_FS
381	select TRACEPOINTS
382	select GENERIC_TRACER
383	select STACKTRACE
384	help
385	  Say Y here if you want to be able to trace the block layer actions
386	  on a given queue. Tracing allows you to see any traffic happening
387	  on a block device queue. For more information (and the userspace
388	  support tools needed), fetch the blktrace tools from:
389
390	  git://git.kernel.dk/blktrace.git
391
392	  Tracing also is possible using the ftrace interface, e.g.:
393
394	    echo 1 > /sys/block/sda/sda1/trace/enable
395	    echo blk > /sys/kernel/debug/tracing/current_tracer
396	    cat /sys/kernel/debug/tracing/trace_pipe
397
398	  If unsure, say N.
399
400config KPROBE_EVENT
401	depends on KPROBES
402	depends on HAVE_REGS_AND_STACK_ACCESS_API
403	bool "Enable kprobes-based dynamic events"
404	select TRACING
405	select PROBE_EVENTS
 
406	default y
407	help
408	  This allows the user to add tracing events (similar to tracepoints)
409	  on the fly via the ftrace interface. See
410	  Documentation/trace/kprobetrace.txt for more details.
411
412	  Those events can be inserted wherever kprobes can probe, and record
413	  various register and memory values.
414
415	  This option is also required by perf-probe subcommand of perf tools.
416	  If you want to use perf tools, this option is strongly recommended.
417
418config UPROBE_EVENT
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
419	bool "Enable uprobes-based dynamic events"
420	depends on ARCH_SUPPORTS_UPROBES
421	depends on MMU
422	depends on PERF_EVENTS
423	select UPROBES
424	select PROBE_EVENTS
 
425	select TRACING
426	default n
427	help
428	  This allows the user to add tracing events on top of userspace
429	  dynamic events (similar to tracepoints) on the fly via the trace
430	  events interface. Those events can be inserted wherever uprobes
431	  can probe, and record various registers.
432	  This option is required if you plan to use perf-probe subcommand
433	  of perf tools on user space applications.
434
435config BPF_EVENTS
436	depends on BPF_SYSCALL
437	depends on (KPROBE_EVENT || UPROBE_EVENT) && PERF_EVENTS
438	bool
439	default y
440	help
441	  This allows the user to attach BPF programs to kprobe events.
 
442
443config PROBE_EVENTS
444	def_bool n
445
446config DYNAMIC_FTRACE
447	bool "enable/disable function tracing dynamically"
448	depends on FUNCTION_TRACER
449	depends on HAVE_DYNAMIC_FTRACE
450	default y
451	help
452	  This option will modify all the calls to function tracing
453	  dynamically (will patch them out of the binary image and
454	  replace them with a No-Op instruction) on boot up. During
455	  compile time, a table is made of all the locations that ftrace
456	  can function trace, and this table is linked into the kernel
457	  image. When this is enabled, functions can be individually
458	  enabled, and the functions not enabled will not affect
459	  performance of the system.
460
461	  See the files in /sys/kernel/debug/tracing:
462	    available_filter_functions
463	    set_ftrace_filter
464	    set_ftrace_notrace
465
466	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
467	  otherwise has native performance as long as no tracing is active.
468
469config DYNAMIC_FTRACE_WITH_REGS
470	def_bool y
471	depends on DYNAMIC_FTRACE
472	depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
473
474config FUNCTION_PROFILER
475	bool "Kernel function profiler"
476	depends on FUNCTION_TRACER
 
477	default n
478	help
479	  This option enables the kernel function profiler. A file is created
480	  in debugfs called function_profile_enabled which defaults to zero.
481	  When a 1 is echoed into this file profiling begins, and when a
482	  zero is entered, profiling stops. A "functions" file is created in
483	  the trace_stats directory; this file shows the list of functions that
484	  have been hit and their counters.
485
486	  If in doubt, say N.
487
488config FTRACE_MCOUNT_RECORD
489	def_bool y
490	depends on DYNAMIC_FTRACE
491	depends on HAVE_FTRACE_MCOUNT_RECORD
492
493config FTRACE_SELFTEST
494	bool
 
495
496config FTRACE_STARTUP_TEST
497	bool "Perform a startup test on ftrace"
498	depends on GENERIC_TRACER
499	select FTRACE_SELFTEST
500	help
501	  This option performs a series of startup tests on ftrace. On bootup
502	  a series of tests are made to verify that the tracer is
503	  functioning properly. It will do tests on all the configured
504	  tracers of ftrace.
505
506config EVENT_TRACE_TEST_SYSCALLS
507	bool "Run selftest on syscall events"
508	depends on FTRACE_STARTUP_TEST
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
509	help
510	 This option will also enable testing every syscall event.
511	 It only enables the event and disables it and runs various loads
512	 with the event enabled. This adds a bit more time for kernel boot
513	 up since it runs this on every system call defined.
 
514
515	 TBD - enable a way to actually call the syscalls as we test their
516	       events
517
518config MMIOTRACE
519	bool "Memory mapped IO tracing"
520	depends on HAVE_MMIOTRACE_SUPPORT && PCI
521	select GENERIC_TRACER
 
 
 
 
 
 
522	help
523	  Mmiotrace traces Memory Mapped I/O access and is meant for
524	  debugging and reverse engineering. It is called from the ioremap
525	  implementation and works via page faults. Tracing is disabled by
526	  default and can be enabled at run-time.
 
 
527
528	  See Documentation/trace/mmiotrace.txt.
529	  If you are not helping to develop drivers, say N.
530
531config MMIOTRACE_TEST
532	tristate "Test module for mmiotrace"
533	depends on MMIOTRACE && m
 
 
 
534	help
535	  This is a dumb module for testing mmiotrace. It is very dangerous
536	  as it will write garbage to IO memory starting at a given address.
537	  However, it should be safe to use on e.g. unused portion of VRAM.
538
539	  Say N, unless you absolutely know what you are doing.
540
541config TRACEPOINT_BENCHMARK
542        bool "Add tracepoint that benchmarks tracepoints"
543	help
544	 This option creates the tracepoint "benchmark:benchmark_event".
545	 When the tracepoint is enabled, it kicks off a kernel thread that
546	 goes into an infinite loop (calling cond_sched() to let other tasks
547	 run), and calls the tracepoint. Each iteration will record the time
548	 it took to write to the tracepoint and the next iteration that
549	 data will be passed to the tracepoint itself. That is, the tracepoint
550	 will report the time it took to do the previous tracepoint.
551	 The string written to the tracepoint is a static string of 128 bytes
552	 to keep the time the same. The initial string is simply a write of
553	 "START". The second string records the cold cache time of the first
554	 write which is not added to the rest of the calculations.
555
556	 As it is a tight loop, it benchmarks as hot cache. That's fine because
557	 we care most about hot paths that are probably in cache already.
558
559	 An example of the output:
560
561	      START
562	      first=3672 [COLD CACHED]
563	      last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712
564	      last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337
565	      last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064
566	      last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411
567	      last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389
568	      last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
569
570
571config RING_BUFFER_BENCHMARK
572	tristate "Ring buffer benchmark stress tester"
573	depends on RING_BUFFER
574	help
575	  This option creates a test to stress the ring buffer and benchmark it.
576	  It creates its own ring buffer such that it will not interfere with
577	  any other users of the ring buffer (such as ftrace). It then creates
578	  a producer and consumer that will run for 10 seconds and sleep for
579	  10 seconds. Each interval it will print out the number of events
580	  it recorded and give a rough estimate of how long each iteration took.
581
582	  It does not disable interrupts or raise its priority, so it may be
583	  affected by processes that are running.
584
585	  If unsure, say N.
586
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
587config RING_BUFFER_STARTUP_TEST
588       bool "Ring buffer startup self test"
589       depends on RING_BUFFER
590       help
591         Run a simple self test on the ring buffer on boot up. Late in the
592	 kernel boot sequence, the test will start that kicks off
593	 a thread per cpu. Each thread will write various size events
594	 into the ring buffer. Another thread is created to send IPIs
595	 to each of the threads, where the IPI handler will also write
596	 to the ring buffer, to test/stress the nesting ability.
597	 If any anomalies are discovered, a warning will be displayed
598	 and all ring buffers will be disabled.
599
600	 The test runs for 10 seconds. This will slow your boot time
601	 by at least 10 more seconds.
602
603	 At the end of the test, statics and more checks are done.
604	 It will output the stats of each per cpu buffer. What
605	 was written, the sizes, what was read, what was lost, and
606	 other similar details.
607
608	 If unsure, say N
609
610config TRACE_ENUM_MAP_FILE
611       bool "Show enum mappings for trace events"
612       depends on TRACING
613       help
614        The "print fmt" of the trace events will show the enum names instead
615	of their values. This can cause problems for user space tools that
616	use this string to parse the raw data as user space does not know
617	how to convert the string to its value.
 
 
 
 
 
 
 
 
618
619	To fix this, there's a special macro in the kernel that can be used
620	to convert the enum into its value. If this macro is used, then the
621	print fmt strings will have the enums converted to their values.
622
623	If something does not get converted properly, this option can be
624	used to show what enums the kernel tried to convert.
 
 
 
 
 
625
626	This option is for debugging the enum conversions. A file is created
627	in the tracing directory called "enum_map" that will show the enum
628	names matched with their values and what trace event system they
629	belong too.
630
631	Normally, the mapping of the strings to values will be freed after
632	boot up or module load. With this option, they will not be freed, as
633	they are needed for the "enum_map" file. Enabling this option will
634	increase the memory footprint of the running kernel.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
636	If unsure, say N
 
637
638config TRACING_EVENTS_GPIO
639	bool "Trace gpio events"
640	depends on GPIOLIB
641	default y
 
642	help
643	  Enable tracing events for gpio subsystem
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
644
645endif # FTRACE
646
647endif # TRACING_SUPPORT
648
v5.14.15
   1# SPDX-License-Identifier: GPL-2.0-only
   2#
   3# Architectures that offer an FUNCTION_TRACER implementation should
   4#  select HAVE_FUNCTION_TRACER:
   5#
   6
   7config USER_STACKTRACE_SUPPORT
   8	bool
   9
  10config NOP_TRACER
  11	bool
  12
 
 
 
 
 
  13config HAVE_FUNCTION_TRACER
  14	bool
  15	help
  16	  See Documentation/trace/ftrace-design.rst
  17
  18config HAVE_FUNCTION_GRAPH_TRACER
  19	bool
  20	help
  21	  See Documentation/trace/ftrace-design.rst
 
 
 
 
 
  22
  23config HAVE_DYNAMIC_FTRACE
  24	bool
  25	help
  26	  See Documentation/trace/ftrace-design.rst
  27
  28config HAVE_DYNAMIC_FTRACE_WITH_REGS
  29	bool
  30
  31config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
  32	bool
  33
  34config HAVE_DYNAMIC_FTRACE_WITH_ARGS
  35	bool
  36	help
  37	 If this is set, then arguments and stack can be found from
  38	 the pt_regs passed into the function callback regs parameter
  39	 by default, even without setting the REGS flag in the ftrace_ops.
  40	 This allows for use of regs_get_kernel_argument() and
  41	 kernel_stack_pointer().
  42
  43config HAVE_FTRACE_MCOUNT_RECORD
  44	bool
  45	help
  46	  See Documentation/trace/ftrace-design.rst
  47
  48config HAVE_SYSCALL_TRACEPOINTS
  49	bool
  50	help
  51	  See Documentation/trace/ftrace-design.rst
  52
  53config HAVE_FENTRY
  54	bool
  55	help
  56	  Arch supports the gcc options -pg with -mfentry
  57
  58config HAVE_NOP_MCOUNT
  59	bool
  60	help
  61	  Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount
  62
  63config HAVE_OBJTOOL_MCOUNT
  64	bool
  65	help
  66	  Arch supports objtool --mcount
  67
  68config HAVE_C_RECORDMCOUNT
  69	bool
  70	help
  71	  C version of recordmcount available?
  72
  73config TRACER_MAX_TRACE
  74	bool
  75
  76config TRACE_CLOCK
  77	bool
  78
  79config RING_BUFFER
  80	bool
  81	select TRACE_CLOCK
  82	select IRQ_WORK
  83
 
 
 
 
 
  84config EVENT_TRACING
  85	select CONTEXT_SWITCH_TRACER
  86	select GLOB
  87	bool
  88
  89config CONTEXT_SWITCH_TRACER
  90	bool
  91
  92config RING_BUFFER_ALLOW_SWAP
  93	bool
  94	help
  95	 Allow the use of ring_buffer_swap_cpu.
  96	 Adds a very slight overhead to tracing when enabled.
  97
  98config PREEMPTIRQ_TRACEPOINTS
  99	bool
 100	depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS
 101	select TRACING
 102	default y
 103	help
 104	  Create preempt/irq toggle tracepoints if needed, so that other parts
 105	  of the kernel can use them to generate or add hooks to them.
 106
 107# All tracer options should select GENERIC_TRACER. For those options that are
 108# enabled by all tracers (context switch and event tracer) they select TRACING.
 109# This allows those options to appear when no other tracer is selected. But the
 110# options do not appear when something else selects it. We need the two options
 111# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
 112# hiding of the automatic options.
 113
 114config TRACING
 115	bool
 
 116	select RING_BUFFER
 117	select STACKTRACE if STACKTRACE_SUPPORT
 118	select TRACEPOINTS
 119	select NOP_TRACER
 120	select BINARY_PRINTF
 121	select EVENT_TRACING
 122	select TRACE_CLOCK
 123
 124config GENERIC_TRACER
 125	bool
 126	select TRACING
 127
 128#
 129# Minimum requirements an architecture has to meet for us to
 130# be able to offer generic tracing facilities:
 131#
 132config TRACING_SUPPORT
 133	bool
 134	depends on TRACE_IRQFLAGS_SUPPORT
 
 
 
 
 135	depends on STACKTRACE_SUPPORT
 136	default y
 137
 138if TRACING_SUPPORT
 139
 140menuconfig FTRACE
 141	bool "Tracers"
 142	default y if DEBUG_KERNEL
 143	help
 144	  Enable the kernel tracing infrastructure.
 145
 146if FTRACE
 147
 148config BOOTTIME_TRACING
 149	bool "Boot-time Tracing support"
 150	depends on TRACING
 151	select BOOT_CONFIG
 152	help
 153	  Enable developer to setup ftrace subsystem via supplemental
 154	  kernel cmdline at boot time for debugging (tracing) driver
 155	  initialization and boot process.
 156
 157config FUNCTION_TRACER
 158	bool "Kernel Function Tracer"
 159	depends on HAVE_FUNCTION_TRACER
 160	select KALLSYMS
 161	select GENERIC_TRACER
 162	select CONTEXT_SWITCH_TRACER
 163	select GLOB
 164	select TASKS_RCU if PREEMPTION
 165	select TASKS_RUDE_RCU
 166	help
 167	  Enable the kernel to trace every kernel function. This is done
 168	  by using a compiler feature to insert a small, 5-byte No-Operation
 169	  instruction at the beginning of every kernel function, which NOP
 170	  sequence is then dynamically patched into a tracer call when
 171	  tracing is enabled by the administrator. If it's runtime disabled
 172	  (the bootup default), then the overhead of the instructions is very
 173	  small and not measurable even in micro-benchmarks.
 174
 175config FUNCTION_GRAPH_TRACER
 176	bool "Kernel Function Graph Tracer"
 177	depends on HAVE_FUNCTION_GRAPH_TRACER
 178	depends on FUNCTION_TRACER
 179	depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
 180	default y
 181	help
 182	  Enable the kernel to trace a function at both its return
 183	  and its entry.
 184	  Its first purpose is to trace the duration of functions and
 185	  draw a call graph for each thread with some information like
 186	  the return value. This is done by setting the current return
 187	  address on the current task structure into a stack of calls.
 188
 189config DYNAMIC_FTRACE
 190	bool "enable/disable function tracing dynamically"
 191	depends on FUNCTION_TRACER
 192	depends on HAVE_DYNAMIC_FTRACE
 193	default y
 194	help
 195	  This option will modify all the calls to function tracing
 196	  dynamically (will patch them out of the binary image and
 197	  replace them with a No-Op instruction) on boot up. During
 198	  compile time, a table is made of all the locations that ftrace
 199	  can function trace, and this table is linked into the kernel
 200	  image. When this is enabled, functions can be individually
 201	  enabled, and the functions not enabled will not affect
 202	  performance of the system.
 203
 204	  See the files in /sys/kernel/debug/tracing:
 205	    available_filter_functions
 206	    set_ftrace_filter
 207	    set_ftrace_notrace
 208
 209	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
 210	  otherwise has native performance as long as no tracing is active.
 211
 212config DYNAMIC_FTRACE_WITH_REGS
 213	def_bool y
 214	depends on DYNAMIC_FTRACE
 215	depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
 216
 217config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
 218	def_bool y
 219	depends on DYNAMIC_FTRACE_WITH_REGS
 220	depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
 221
 222config DYNAMIC_FTRACE_WITH_ARGS
 223	def_bool y
 224	depends on DYNAMIC_FTRACE
 225	depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS
 226
 227config FUNCTION_PROFILER
 228	bool "Kernel function profiler"
 229	depends on FUNCTION_TRACER
 230	default n
 231	help
 232	  This option enables the kernel function profiler. A file is created
 233	  in debugfs called function_profile_enabled which defaults to zero.
 234	  When a 1 is echoed into this file profiling begins, and when a
 235	  zero is entered, profiling stops. A "functions" file is created in
 236	  the trace_stat directory; this file shows the list of functions that
 237	  have been hit and their counters.
 238
 239	  If in doubt, say N.
 240
 241config STACK_TRACER
 242	bool "Trace max stack"
 243	depends on HAVE_FUNCTION_TRACER
 244	select FUNCTION_TRACER
 245	select STACKTRACE
 246	select KALLSYMS
 247	help
 248	  This special tracer records the maximum stack footprint of the
 249	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
 250
 251	  This tracer works by hooking into every function call that the
 252	  kernel executes, and keeping a maximum stack depth value and
 253	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
 254	  then it will not have any overhead while the stack tracer
 255	  is disabled.
 256
 257	  To enable the stack tracer on bootup, pass in 'stacktrace'
 258	  on the kernel command line.
 259
 260	  The stack tracer can also be enabled or disabled via the
 261	  sysctl kernel.stack_tracer_enabled
 262
 263	  Say N if unsure.
 264
 265config TRACE_PREEMPT_TOGGLE
 266	bool
 267	help
 268	  Enables hooks which will be called when preemption is first disabled,
 269	  and last enabled.
 270
 271config IRQSOFF_TRACER
 272	bool "Interrupts-off Latency Tracer"
 273	default n
 274	depends on TRACE_IRQFLAGS_SUPPORT
 
 275	select TRACE_IRQFLAGS
 276	select GENERIC_TRACER
 277	select TRACER_MAX_TRACE
 278	select RING_BUFFER_ALLOW_SWAP
 279	select TRACER_SNAPSHOT
 280	select TRACER_SNAPSHOT_PER_CPU_SWAP
 281	help
 282	  This option measures the time spent in irqs-off critical
 283	  sections, with microsecond accuracy.
 284
 285	  The default measurement method is a maximum search, which is
 286	  disabled by default and can be runtime (re-)started
 287	  via:
 288
 289	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
 290
 291	  (Note that kernel size and overhead increase with this option
 292	  enabled. This option and the preempt-off timing option can be
 293	  used together or separately.)
 294
 295config PREEMPT_TRACER
 296	bool "Preemption-off Latency Tracer"
 297	default n
 298	depends on PREEMPTION
 
 299	select GENERIC_TRACER
 300	select TRACER_MAX_TRACE
 301	select RING_BUFFER_ALLOW_SWAP
 302	select TRACER_SNAPSHOT
 303	select TRACER_SNAPSHOT_PER_CPU_SWAP
 304	select TRACE_PREEMPT_TOGGLE
 305	help
 306	  This option measures the time spent in preemption-off critical
 307	  sections, with microsecond accuracy.
 308
 309	  The default measurement method is a maximum search, which is
 310	  disabled by default and can be runtime (re-)started
 311	  via:
 312
 313	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
 314
 315	  (Note that kernel size and overhead increase with this option
 316	  enabled. This option and the irqs-off timing option can be
 317	  used together or separately.)
 318
 319config SCHED_TRACER
 320	bool "Scheduling Latency Tracer"
 321	select GENERIC_TRACER
 322	select CONTEXT_SWITCH_TRACER
 323	select TRACER_MAX_TRACE
 324	select TRACER_SNAPSHOT
 325	help
 326	  This tracer tracks the latency of the highest priority task
 327	  to be scheduled in, starting from the point it has woken up.
 328
 329config HWLAT_TRACER
 330	bool "Tracer to detect hardware latencies (like SMIs)"
 331	select GENERIC_TRACER
 332	help
 333	 This tracer, when enabled will create one or more kernel threads,
 334	 depending on what the cpumask file is set to, which each thread
 335	 spinning in a loop looking for interruptions caused by
 336	 something other than the kernel. For example, if a
 337	 System Management Interrupt (SMI) takes a noticeable amount of
 338	 time, this tracer will detect it. This is useful for testing
 339	 if a system is reliable for Real Time tasks.
 340
 341	 Some files are created in the tracing directory when this
 342	 is enabled:
 343
 344	   hwlat_detector/width   - time in usecs for how long to spin for
 345	   hwlat_detector/window  - time in usecs between the start of each
 346				     iteration
 347
 348	 A kernel thread is created that will spin with interrupts disabled
 349	 for "width" microseconds in every "window" cycle. It will not spin
 350	 for "window - width" microseconds, where the system can
 351	 continue to operate.
 352
 353	 The output will appear in the trace and trace_pipe files.
 354
 355	 When the tracer is not running, it has no affect on the system,
 356	 but when it is running, it can cause the system to be
 357	 periodically non responsive. Do not run this tracer on a
 358	 production system.
 359
 360	 To enable this tracer, echo in "hwlat" into the current_tracer
 361	 file. Every time a latency is greater than tracing_thresh, it will
 362	 be recorded into the ring buffer.
 363
 364config OSNOISE_TRACER
 365	bool "OS Noise tracer"
 366	select GENERIC_TRACER
 367	help
 368	  In the context of high-performance computing (HPC), the Operating
 369	  System Noise (osnoise) refers to the interference experienced by an
 370	  application due to activities inside the operating system. In the
 371	  context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread
 372	  can cause noise to the system. Moreover, hardware-related jobs can
 373	  also cause noise, for example, via SMIs.
 374
 375	  The osnoise tracer leverages the hwlat_detector by running a similar
 376	  loop with preemption, SoftIRQs and IRQs enabled, thus allowing all
 377	  the sources of osnoise during its execution. The osnoise tracer takes
 378	  note of the entry and exit point of any source of interferences,
 379	  increasing a per-cpu interference counter. It saves an interference
 380	  counter for each source of interference. The interference counter for
 381	  NMI, IRQs, SoftIRQs, and threads is increased anytime the tool
 382	  observes these interferences' entry events. When a noise happens
 383	  without any interference from the operating system level, the
 384	  hardware noise counter increases, pointing to a hardware-related
 385	  noise. In this way, osnoise can account for any source of
 386	  interference. At the end of the period, the osnoise tracer prints
 387	  the sum of all noise, the max single noise, the percentage of CPU
 388	  available for the thread, and the counters for the noise sources.
 389
 390	  In addition to the tracer, a set of tracepoints were added to
 391	  facilitate the identification of the osnoise source.
 392
 393	  The output will appear in the trace and trace_pipe files.
 394
 395	  To enable this tracer, echo in "osnoise" into the current_tracer
 396          file.
 397
 398config TIMERLAT_TRACER
 399	bool "Timerlat tracer"
 400	select OSNOISE_TRACER
 401	select GENERIC_TRACER
 402	help
 403	  The timerlat tracer aims to help the preemptive kernel developers
 404	  to find sources of wakeup latencies of real-time threads.
 405
 406	  The tracer creates a per-cpu kernel thread with real-time priority.
 407	  The tracer thread sets a periodic timer to wakeup itself, and goes
 408	  to sleep waiting for the timer to fire. At the wakeup, the thread
 409	  then computes a wakeup latency value as the difference between
 410	  the current time and the absolute time that the timer was set
 411	  to expire.
 412
 413	  The tracer prints two lines at every activation. The first is the
 414	  timer latency observed at the hardirq context before the
 415	  activation of the thread. The second is the timer latency observed
 416	  by the thread, which is the same level that cyclictest reports. The
 417	  ACTIVATION ID field serves to relate the irq execution to its
 418	  respective thread execution.
 419
 420	  The tracer is build on top of osnoise tracer, and the osnoise:
 421	  events can be used to trace the source of interference from NMI,
 422	  IRQs and other threads. It also enables the capture of the
 423	  stacktrace at the IRQ context, which helps to identify the code
 424	  path that can cause thread delay.
 425
 426config MMIOTRACE
 427	bool "Memory mapped IO tracing"
 428	depends on HAVE_MMIOTRACE_SUPPORT && PCI
 429	select GENERIC_TRACER
 430	help
 431	  Mmiotrace traces Memory Mapped I/O access and is meant for
 432	  debugging and reverse engineering. It is called from the ioremap
 433	  implementation and works via page faults. Tracing is disabled by
 434	  default and can be enabled at run-time.
 435
 436	  See Documentation/trace/mmiotrace.rst.
 437	  If you are not helping to develop drivers, say N.
 438
 439config ENABLE_DEFAULT_TRACERS
 440	bool "Trace process context switches and events"
 441	depends on !GENERIC_TRACER
 442	select TRACING
 443	help
 444	  This tracer hooks to various trace points in the kernel,
 445	  allowing the user to pick and choose which trace point they
 446	  want to trace. It also includes the sched_switch tracer plugin.
 447
 448config FTRACE_SYSCALLS
 449	bool "Trace syscalls"
 450	depends on HAVE_SYSCALL_TRACEPOINTS
 451	select GENERIC_TRACER
 452	select KALLSYMS
 453	help
 454	  Basic tracer to catch the syscall entry and exit events.
 455
 456config TRACER_SNAPSHOT
 457	bool "Create a snapshot trace buffer"
 458	select TRACER_MAX_TRACE
 459	help
 460	  Allow tracing users to take snapshot of the current buffer using the
 461	  ftrace interface, e.g.:
 462
 463	      echo 1 > /sys/kernel/debug/tracing/snapshot
 464	      cat snapshot
 465
 466config TRACER_SNAPSHOT_PER_CPU_SWAP
 467	bool "Allow snapshot to swap per CPU"
 468	depends on TRACER_SNAPSHOT
 469	select RING_BUFFER_ALLOW_SWAP
 470	help
 471	  Allow doing a snapshot of a single CPU buffer instead of a
 472	  full swap (all buffers). If this is set, then the following is
 473	  allowed:
 474
 475	      echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
 476
 477	  After which, only the tracing buffer for CPU 2 was swapped with
 478	  the main tracing buffer, and the other CPU buffers remain the same.
 479
 480	  When this is enabled, this adds a little more overhead to the
 481	  trace recording, as it needs to add some checks to synchronize
 482	  recording with swaps. But this does not affect the performance
 483	  of the overall system. This is enabled by default when the preempt
 484	  or irq latency tracers are enabled, as those need to swap as well
 485	  and already adds the overhead (plus a lot more).
 486
 487config TRACE_BRANCH_PROFILING
 488	bool
 489	select GENERIC_TRACER
 490
 491choice
 492	prompt "Branch Profiling"
 493	default BRANCH_PROFILE_NONE
 494	help
 495	 The branch profiling is a software profiler. It will add hooks
 496	 into the C conditionals to test which path a branch takes.
 497
 498	 The likely/unlikely profiler only looks at the conditions that
 499	 are annotated with a likely or unlikely macro.
 500
 501	 The "all branch" profiler will profile every if-statement in the
 502	 kernel. This profiler will also enable the likely/unlikely
 503	 profiler.
 504
 505	 Either of the above profilers adds a bit of overhead to the system.
 506	 If unsure, choose "No branch profiling".
 507
 508config BRANCH_PROFILE_NONE
 509	bool "No branch profiling"
 510	help
 511	  No branch profiling. Branch profiling adds a bit of overhead.
 512	  Only enable it if you want to analyse the branching behavior.
 513	  Otherwise keep it disabled.
 514
 515config PROFILE_ANNOTATED_BRANCHES
 516	bool "Trace likely/unlikely profiler"
 517	select TRACE_BRANCH_PROFILING
 518	help
 519	  This tracer profiles all likely and unlikely macros
 520	  in the kernel. It will display the results in:
 521
 522	  /sys/kernel/debug/tracing/trace_stat/branch_annotated
 523
 524	  Note: this will add a significant overhead; only turn this
 525	  on if you need to profile the system's use of these macros.
 526
 527config PROFILE_ALL_BRANCHES
 528	bool "Profile all if conditionals" if !FORTIFY_SOURCE
 529	select TRACE_BRANCH_PROFILING
 530	help
 531	  This tracer profiles all branch conditions. Every if ()
 532	  taken in the kernel is recorded whether it hit or miss.
 533	  The results will be displayed in:
 534
 535	  /sys/kernel/debug/tracing/trace_stat/branch_all
 536
 537	  This option also enables the likely/unlikely profiler.
 538
 539	  This configuration, when enabled, will impose a great overhead
 540	  on the system. This should only be enabled when the system
 541	  is to be analyzed in much detail.
 542endchoice
 543
 544config TRACING_BRANCHES
 545	bool
 546	help
 547	  Selected by tracers that will trace the likely and unlikely
 548	  conditions. This prevents the tracers themselves from being
 549	  profiled. Profiling the tracing infrastructure can only happen
 550	  when the likelys and unlikelys are not being traced.
 551
 552config BRANCH_TRACER
 553	bool "Trace likely/unlikely instances"
 554	depends on TRACE_BRANCH_PROFILING
 555	select TRACING_BRANCHES
 556	help
 557	  This traces the events of likely and unlikely condition
 558	  calls in the kernel.  The difference between this and the
 559	  "Trace likely/unlikely profiler" is that this is not a
 560	  histogram of the callers, but actually places the calling
 561	  events into a running trace buffer to see when and where the
 562	  events happened, as well as their results.
 563
 564	  Say N if unsure.
 565
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 566config BLK_DEV_IO_TRACE
 567	bool "Support for tracing block IO actions"
 568	depends on SYSFS
 569	depends on BLOCK
 570	select RELAY
 571	select DEBUG_FS
 572	select TRACEPOINTS
 573	select GENERIC_TRACER
 574	select STACKTRACE
 575	help
 576	  Say Y here if you want to be able to trace the block layer actions
 577	  on a given queue. Tracing allows you to see any traffic happening
 578	  on a block device queue. For more information (and the userspace
 579	  support tools needed), fetch the blktrace tools from:
 580
 581	  git://git.kernel.dk/blktrace.git
 582
 583	  Tracing also is possible using the ftrace interface, e.g.:
 584
 585	    echo 1 > /sys/block/sda/sda1/trace/enable
 586	    echo blk > /sys/kernel/debug/tracing/current_tracer
 587	    cat /sys/kernel/debug/tracing/trace_pipe
 588
 589	  If unsure, say N.
 590
 591config KPROBE_EVENTS
 592	depends on KPROBES
 593	depends on HAVE_REGS_AND_STACK_ACCESS_API
 594	bool "Enable kprobes-based dynamic events"
 595	select TRACING
 596	select PROBE_EVENTS
 597	select DYNAMIC_EVENTS
 598	default y
 599	help
 600	  This allows the user to add tracing events (similar to tracepoints)
 601	  on the fly via the ftrace interface. See
 602	  Documentation/trace/kprobetrace.rst for more details.
 603
 604	  Those events can be inserted wherever kprobes can probe, and record
 605	  various register and memory values.
 606
 607	  This option is also required by perf-probe subcommand of perf tools.
 608	  If you want to use perf tools, this option is strongly recommended.
 609
 610config KPROBE_EVENTS_ON_NOTRACE
 611	bool "Do NOT protect notrace function from kprobe events"
 612	depends on KPROBE_EVENTS
 613	depends on DYNAMIC_FTRACE
 614	default n
 615	help
 616	  This is only for the developers who want to debug ftrace itself
 617	  using kprobe events.
 618
 619	  If kprobes can use ftrace instead of breakpoint, ftrace related
 620	  functions are protected from kprobe-events to prevent an infinite
 621	  recursion or any unexpected execution path which leads to a kernel
 622	  crash.
 623
 624	  This option disables such protection and allows you to put kprobe
 625	  events on ftrace functions for debugging ftrace by itself.
 626	  Note that this might let you shoot yourself in the foot.
 627
 628	  If unsure, say N.
 629
 630config UPROBE_EVENTS
 631	bool "Enable uprobes-based dynamic events"
 632	depends on ARCH_SUPPORTS_UPROBES
 633	depends on MMU
 634	depends on PERF_EVENTS
 635	select UPROBES
 636	select PROBE_EVENTS
 637	select DYNAMIC_EVENTS
 638	select TRACING
 639	default y
 640	help
 641	  This allows the user to add tracing events on top of userspace
 642	  dynamic events (similar to tracepoints) on the fly via the trace
 643	  events interface. Those events can be inserted wherever uprobes
 644	  can probe, and record various registers.
 645	  This option is required if you plan to use perf-probe subcommand
 646	  of perf tools on user space applications.
 647
 648config BPF_EVENTS
 649	depends on BPF_SYSCALL
 650	depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS
 651	bool
 652	default y
 653	help
 654	  This allows the user to attach BPF programs to kprobe, uprobe, and
 655	  tracepoint events.
 656
 657config DYNAMIC_EVENTS
 658	def_bool n
 659
 660config PROBE_EVENTS
 661	def_bool n
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 662
 663config BPF_KPROBE_OVERRIDE
 664	bool "Enable BPF programs to override a kprobed function"
 665	depends on BPF_EVENTS
 666	depends on FUNCTION_ERROR_INJECTION
 667	default n
 668	help
 669	 Allows BPF to override the execution of a probed function and
 670	 set a different return value.  This is used for error injection.
 
 
 
 
 
 
 671
 672config FTRACE_MCOUNT_RECORD
 673	def_bool y
 674	depends on DYNAMIC_FTRACE
 675	depends on HAVE_FTRACE_MCOUNT_RECORD
 676
 677config FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
 678	bool
 679	depends on FTRACE_MCOUNT_RECORD
 680
 681config FTRACE_MCOUNT_USE_CC
 682	def_bool y
 683	depends on $(cc-option,-mrecord-mcount)
 684	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
 685	depends on FTRACE_MCOUNT_RECORD
 
 
 
 
 686
 687config FTRACE_MCOUNT_USE_OBJTOOL
 688	def_bool y
 689	depends on HAVE_OBJTOOL_MCOUNT
 690	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
 691	depends on !FTRACE_MCOUNT_USE_CC
 692	depends on FTRACE_MCOUNT_RECORD
 693
 694config FTRACE_MCOUNT_USE_RECORDMCOUNT
 695	def_bool y
 696	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
 697	depends on !FTRACE_MCOUNT_USE_CC
 698	depends on !FTRACE_MCOUNT_USE_OBJTOOL
 699	depends on FTRACE_MCOUNT_RECORD
 700
 701config TRACING_MAP
 702	bool
 703	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
 704	help
 705	  tracing_map is a special-purpose lock-free map for tracing,
 706	  separated out as a stand-alone facility in order to allow it
 707	  to be shared between multiple tracers.  It isn't meant to be
 708	  generally used outside of that context, and is normally
 709	  selected by tracers that use it.
 710
 711config SYNTH_EVENTS
 712	bool "Synthetic trace events"
 713	select TRACING
 714	select DYNAMIC_EVENTS
 715	default n
 716	help
 717	  Synthetic events are user-defined trace events that can be
 718	  used to combine data from other trace events or in fact any
 719	  data source.  Synthetic events can be generated indirectly
 720	  via the trace() action of histogram triggers or directly
 721	  by way of an in-kernel API.
 722
 723	  See Documentation/trace/events.rst or
 724	  Documentation/trace/histogram.rst for details and examples.
 725
 726	  If in doubt, say N.
 727
 728config HIST_TRIGGERS
 729	bool "Histogram triggers"
 730	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
 731	select TRACING_MAP
 732	select TRACING
 733	select DYNAMIC_EVENTS
 734	select SYNTH_EVENTS
 735	default n
 736	help
 737	  Hist triggers allow one or more arbitrary trace event fields
 738	  to be aggregated into hash tables and dumped to stdout by
 739	  reading a debugfs/tracefs file.  They're useful for
 740	  gathering quick and dirty (though precise) summaries of
 741	  event activity as an initial guide for further investigation
 742	  using more advanced tools.
 743
 744	  Inter-event tracing of quantities such as latencies is also
 745	  supported using hist triggers under this option.
 746
 747	  See Documentation/trace/histogram.rst.
 748	  If in doubt, say N.
 749
 750config TRACE_EVENT_INJECT
 751	bool "Trace event injection"
 752	depends on TRACING
 753	help
 754	  Allow user-space to inject a specific trace event into the ring
 755	  buffer. This is mainly used for testing purpose.
 
 756
 757	  If unsure, say N.
 758
 759config TRACEPOINT_BENCHMARK
 760	bool "Add tracepoint that benchmarks tracepoints"
 761	help
 762	 This option creates the tracepoint "benchmark:benchmark_event".
 763	 When the tracepoint is enabled, it kicks off a kernel thread that
 764	 goes into an infinite loop (calling cond_resched() to let other tasks
 765	 run), and calls the tracepoint. Each iteration will record the time
 766	 it took to write to the tracepoint and the next iteration that
 767	 data will be passed to the tracepoint itself. That is, the tracepoint
 768	 will report the time it took to do the previous tracepoint.
 769	 The string written to the tracepoint is a static string of 128 bytes
 770	 to keep the time the same. The initial string is simply a write of
 771	 "START". The second string records the cold cache time of the first
 772	 write which is not added to the rest of the calculations.
 773
 774	 As it is a tight loop, it benchmarks as hot cache. That's fine because
 775	 we care most about hot paths that are probably in cache already.
 776
 777	 An example of the output:
 778
 779	      START
 780	      first=3672 [COLD CACHED]
 781	      last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712
 782	      last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337
 783	      last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064
 784	      last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411
 785	      last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389
 786	      last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
 787
 788
 789config RING_BUFFER_BENCHMARK
 790	tristate "Ring buffer benchmark stress tester"
 791	depends on RING_BUFFER
 792	help
 793	  This option creates a test to stress the ring buffer and benchmark it.
 794	  It creates its own ring buffer such that it will not interfere with
 795	  any other users of the ring buffer (such as ftrace). It then creates
 796	  a producer and consumer that will run for 10 seconds and sleep for
 797	  10 seconds. Each interval it will print out the number of events
 798	  it recorded and give a rough estimate of how long each iteration took.
 799
 800	  It does not disable interrupts or raise its priority, so it may be
 801	  affected by processes that are running.
 802
 803	  If unsure, say N.
 804
 805config TRACE_EVAL_MAP_FILE
 806       bool "Show eval mappings for trace events"
 807       depends on TRACING
 808       help
 809	The "print fmt" of the trace events will show the enum/sizeof names
 810	instead of their values. This can cause problems for user space tools
 811	that use this string to parse the raw data as user space does not know
 812	how to convert the string to its value.
 813
 814	To fix this, there's a special macro in the kernel that can be used
 815	to convert an enum/sizeof into its value. If this macro is used, then
 816	the print fmt strings will be converted to their values.
 817
 818	If something does not get converted properly, this option can be
 819	used to show what enums/sizeof the kernel tried to convert.
 820
 821	This option is for debugging the conversions. A file is created
 822	in the tracing directory called "eval_map" that will show the
 823	names matched with their values and what trace event system they
 824	belong too.
 825
 826	Normally, the mapping of the strings to values will be freed after
 827	boot up or module load. With this option, they will not be freed, as
 828	they are needed for the "eval_map" file. Enabling this option will
 829	increase the memory footprint of the running kernel.
 830
 831	If unsure, say N.
 832
 833config FTRACE_RECORD_RECURSION
 834	bool "Record functions that recurse in function tracing"
 835	depends on FUNCTION_TRACER
 836	help
 837	  All callbacks that attach to the function tracing have some sort
 838	  of protection against recursion. Even though the protection exists,
 839	  it adds overhead. This option will create a file in the tracefs
 840	  file system called "recursed_functions" that will list the functions
 841	  that triggered a recursion.
 842
 843	  This will add more overhead to cases that have recursion.
 844
 845	  If unsure, say N
 846
 847config FTRACE_RECORD_RECURSION_SIZE
 848	int "Max number of recursed functions to record"
 849	default	128
 850	depends on FTRACE_RECORD_RECURSION
 851	help
 852	  This defines the limit of number of functions that can be
 853	  listed in the "recursed_functions" file, that lists all
 854	  the functions that caused a recursion to happen.
 855	  This file can be reset, but the limit can not change in
 856	  size at runtime.
 857
 858config RING_BUFFER_RECORD_RECURSION
 859	bool "Record functions that recurse in the ring buffer"
 860	depends on FTRACE_RECORD_RECURSION
 861	# default y, because it is coupled with FTRACE_RECORD_RECURSION
 862	default y
 863	help
 864	  The ring buffer has its own internal recursion. Although when
 865	  recursion happens it wont cause harm because of the protection,
 866	  but it does cause an unwanted overhead. Enabling this option will
 867	  place where recursion was detected into the ftrace "recursed_functions"
 868	  file.
 869
 870	  This will add more overhead to cases that have recursion.
 871
 872config GCOV_PROFILE_FTRACE
 873	bool "Enable GCOV profiling on ftrace subsystem"
 874	depends on GCOV_KERNEL
 875	help
 876	  Enable GCOV profiling on ftrace subsystem for checking
 877	  which functions/lines are tested.
 878
 879	  If unsure, say N.
 880
 881	  Note that on a kernel compiled with this config, ftrace will
 882	  run significantly slower.
 883
 884config FTRACE_SELFTEST
 885	bool
 886
 887config FTRACE_STARTUP_TEST
 888	bool "Perform a startup test on ftrace"
 889	depends on GENERIC_TRACER
 890	select FTRACE_SELFTEST
 891	help
 892	  This option performs a series of startup tests on ftrace. On bootup
 893	  a series of tests are made to verify that the tracer is
 894	  functioning properly. It will do tests on all the configured
 895	  tracers of ftrace.
 896
 897config EVENT_TRACE_STARTUP_TEST
 898	bool "Run selftest on trace events"
 899	depends on FTRACE_STARTUP_TEST
 900	default y
 901	help
 902	  This option performs a test on all trace events in the system.
 903	  It basically just enables each event and runs some code that
 904	  will trigger events (not necessarily the event it enables)
 905	  This may take some time run as there are a lot of events.
 906
 907config EVENT_TRACE_TEST_SYSCALLS
 908	bool "Run selftest on syscall events"
 909	depends on EVENT_TRACE_STARTUP_TEST
 910	help
 911	 This option will also enable testing every syscall event.
 912	 It only enables the event and disables it and runs various loads
 913	 with the event enabled. This adds a bit more time for kernel boot
 914	 up since it runs this on every system call defined.
 915
 916	 TBD - enable a way to actually call the syscalls as we test their
 917	       events
 918
 919config RING_BUFFER_STARTUP_TEST
 920       bool "Ring buffer startup self test"
 921       depends on RING_BUFFER
 922       help
 923	 Run a simple self test on the ring buffer on boot up. Late in the
 924	 kernel boot sequence, the test will start that kicks off
 925	 a thread per cpu. Each thread will write various size events
 926	 into the ring buffer. Another thread is created to send IPIs
 927	 to each of the threads, where the IPI handler will also write
 928	 to the ring buffer, to test/stress the nesting ability.
 929	 If any anomalies are discovered, a warning will be displayed
 930	 and all ring buffers will be disabled.
 931
 932	 The test runs for 10 seconds. This will slow your boot time
 933	 by at least 10 more seconds.
 934
 935	 At the end of the test, statics and more checks are done.
 936	 It will output the stats of each per cpu buffer. What
 937	 was written, the sizes, what was read, what was lost, and
 938	 other similar details.
 939
 940	 If unsure, say N
 941
 942config RING_BUFFER_VALIDATE_TIME_DELTAS
 943	bool "Verify ring buffer time stamp deltas"
 944	depends on RING_BUFFER
 945	help
 946	  This will audit the time stamps on the ring buffer sub
 947	  buffer to make sure that all the time deltas for the
 948	  events on a sub buffer matches the current time stamp.
 949	  This audit is performed for every event that is not
 950	  interrupted, or interrupting another event. A check
 951	  is also made when traversing sub buffers to make sure
 952	  that all the deltas on the previous sub buffer do not
 953	  add up to be greater than the current time stamp.
 954
 955	  NOTE: This adds significant overhead to recording of events,
 956	  and should only be used to test the logic of the ring buffer.
 957	  Do not use it on production systems.
 958
 959	  Only say Y if you understand what this does, and you
 960	  still want it enabled. Otherwise say N
 
 961
 962config MMIOTRACE_TEST
 963	tristate "Test module for mmiotrace"
 964	depends on MMIOTRACE && m
 965	help
 966	  This is a dumb module for testing mmiotrace. It is very dangerous
 967	  as it will write garbage to IO memory starting at a given address.
 968	  However, it should be safe to use on e.g. unused portion of VRAM.
 969
 970	  Say N, unless you absolutely know what you are doing.
 
 
 
 971
 972config PREEMPTIRQ_DELAY_TEST
 973	tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers"
 974	depends on m
 975	help
 976	  Select this option to build a test module that can help test latency
 977	  tracers by executing a preempt or irq disable section with a user
 978	  configurable delay. The module busy waits for the duration of the
 979	  critical section.
 980
 981	  For example, the following invocation generates a burst of three
 982	  irq-disabled critical sections for 500us:
 983	  modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3
 984
 985	  What's more, if you want to attach the test on the cpu which the latency
 986	  tracer is running on, specify cpu_affinity=cpu_num at the end of the
 987	  command.
 988
 989	  If unsure, say N
 990
 991config SYNTH_EVENT_GEN_TEST
 992	tristate "Test module for in-kernel synthetic event generation"
 993	depends on SYNTH_EVENTS
 994	help
 995          This option creates a test module to check the base
 996          functionality of in-kernel synthetic event definition and
 997          generation.
 998
 999          To test, insert the module, and then check the trace buffer
1000	  for the generated sample events.
1001
1002	  If unsure, say N.
1003
1004config KPROBE_EVENT_GEN_TEST
1005	tristate "Test module for in-kernel kprobe event generation"
1006	depends on KPROBE_EVENTS
1007	help
1008          This option creates a test module to check the base
1009          functionality of in-kernel kprobe event definition.
1010
1011          To test, insert the module, and then check the trace buffer
1012	  for the generated kprobe events.
1013
1014	  If unsure, say N.
1015
1016config HIST_TRIGGERS_DEBUG
1017	bool "Hist trigger debug support"
1018	depends on HIST_TRIGGERS
1019	help
1020          Add "hist_debug" file for each event, which when read will
1021          dump out a bunch of internal details about the hist triggers
1022          defined on that event.
1023
1024          The hist_debug file serves a couple of purposes:
1025
1026            - Helps developers verify that nothing is broken.
1027
1028            - Provides educational information to support the details
1029              of the hist trigger internals as described by
1030              Documentation/trace/histogram-design.rst.
1031
1032          The hist_debug output only covers the data structures
1033          related to the histogram definitions themselves and doesn't
1034          display the internals of map buckets or variable values of
1035          running histograms.
1036
1037          If unsure, say N.
1038
1039endif # FTRACE
1040
1041endif # TRACING_SUPPORT
1042