Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1#
  2# Architectures that offer an FUNCTION_TRACER implementation should
  3#  select HAVE_FUNCTION_TRACER:
  4#
  5
  6config USER_STACKTRACE_SUPPORT
  7	bool
  8
  9config NOP_TRACER
 10	bool
 11
 12config HAVE_FTRACE_NMI_ENTER
 13	bool
 14	help
 15	  See Documentation/trace/ftrace-design.txt
 16
 17config HAVE_FUNCTION_TRACER
 18	bool
 19	help
 20	  See Documentation/trace/ftrace-design.txt
 21
 22config HAVE_FUNCTION_GRAPH_TRACER
 23	bool
 24	help
 25	  See Documentation/trace/ftrace-design.txt
 26
 27config HAVE_DYNAMIC_FTRACE
 28	bool
 29	help
 30	  See Documentation/trace/ftrace-design.txt
 31
 32config HAVE_DYNAMIC_FTRACE_WITH_REGS
 33	bool
 34
 
 
 
 
 
 
 
 
 
 
 
 
 35config HAVE_FTRACE_MCOUNT_RECORD
 36	bool
 37	help
 38	  See Documentation/trace/ftrace-design.txt
 39
 40config HAVE_SYSCALL_TRACEPOINTS
 41	bool
 42	help
 43	  See Documentation/trace/ftrace-design.txt
 44
 45config HAVE_FENTRY
 46	bool
 47	help
 48	  Arch supports the gcc options -pg with -mfentry
 49
 
 
 
 
 
 
 
 
 
 
 50config HAVE_C_RECORDMCOUNT
 51	bool
 52	help
 53	  C version of recordmcount available?
 54
 55config TRACER_MAX_TRACE
 56	bool
 57
 58config TRACE_CLOCK
 59	bool
 60
 61config RING_BUFFER
 62	bool
 63	select TRACE_CLOCK
 64	select IRQ_WORK
 65
 66config FTRACE_NMI_ENTER
 67       bool
 68       depends on HAVE_FTRACE_NMI_ENTER
 69       default y
 70
 71config EVENT_TRACING
 72	select CONTEXT_SWITCH_TRACER
 73        select GLOB
 74	bool
 75
 76config CONTEXT_SWITCH_TRACER
 77	bool
 78
 79config RING_BUFFER_ALLOW_SWAP
 80	bool
 81	help
 82	 Allow the use of ring_buffer_swap_cpu.
 83	 Adds a very slight overhead to tracing when enabled.
 84
 
 
 
 
 
 
 
 
 
 85# All tracer options should select GENERIC_TRACER. For those options that are
 86# enabled by all tracers (context switch and event tracer) they select TRACING.
 87# This allows those options to appear when no other tracer is selected. But the
 88# options do not appear when something else selects it. We need the two options
 89# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
 90# hiding of the automatic options.
 91
 92config TRACING
 93	bool
 94	select DEBUG_FS
 95	select RING_BUFFER
 96	select STACKTRACE if STACKTRACE_SUPPORT
 97	select TRACEPOINTS
 98	select NOP_TRACER
 99	select BINARY_PRINTF
100	select EVENT_TRACING
101	select TRACE_CLOCK
102
103config GENERIC_TRACER
104	bool
105	select TRACING
106
107#
108# Minimum requirements an architecture has to meet for us to
109# be able to offer generic tracing facilities:
110#
111config TRACING_SUPPORT
112	bool
113	# PPC32 has no irqflags tracing support, but it can use most of the
114	# tracers anyway, they were tested to build and work. Note that new
115	# exceptions to this list aren't welcomed, better implement the
116	# irqflags tracing for your architecture.
117	depends on TRACE_IRQFLAGS_SUPPORT || PPC32
118	depends on STACKTRACE_SUPPORT
119	default y
120
121if TRACING_SUPPORT
122
123menuconfig FTRACE
124	bool "Tracers"
125	default y if DEBUG_KERNEL
126	help
127	  Enable the kernel tracing infrastructure.
128
129if FTRACE
130
 
 
 
 
 
 
 
 
 
131config FUNCTION_TRACER
132	bool "Kernel Function Tracer"
133	depends on HAVE_FUNCTION_TRACER
134	select KALLSYMS
135	select GENERIC_TRACER
136	select CONTEXT_SWITCH_TRACER
137        select GLOB
 
 
138	help
139	  Enable the kernel to trace every kernel function. This is done
140	  by using a compiler feature to insert a small, 5-byte No-Operation
141	  instruction at the beginning of every kernel function, which NOP
142	  sequence is then dynamically patched into a tracer call when
143	  tracing is enabled by the administrator. If it's runtime disabled
144	  (the bootup default), then the overhead of the instructions is very
145	  small and not measurable even in micro-benchmarks.
146
147config FUNCTION_GRAPH_TRACER
148	bool "Kernel Function Graph Tracer"
149	depends on HAVE_FUNCTION_GRAPH_TRACER
150	depends on FUNCTION_TRACER
151	depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
152	default y
153	help
154	  Enable the kernel to trace a function at both its return
155	  and its entry.
156	  Its first purpose is to trace the duration of functions and
157	  draw a call graph for each thread with some information like
158	  the return value. This is done by setting the current return
159	  address on the current task structure into a stack of calls.
160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
162config IRQSOFF_TRACER
163	bool "Interrupts-off Latency Tracer"
164	default n
165	depends on TRACE_IRQFLAGS_SUPPORT
166	depends on !ARCH_USES_GETTIMEOFFSET
167	select TRACE_IRQFLAGS
168	select GENERIC_TRACER
169	select TRACER_MAX_TRACE
170	select RING_BUFFER_ALLOW_SWAP
171	select TRACER_SNAPSHOT
172	select TRACER_SNAPSHOT_PER_CPU_SWAP
173	help
174	  This option measures the time spent in irqs-off critical
175	  sections, with microsecond accuracy.
176
177	  The default measurement method is a maximum search, which is
178	  disabled by default and can be runtime (re-)started
179	  via:
180
181	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
182
183	  (Note that kernel size and overhead increase with this option
184	  enabled. This option and the preempt-off timing option can be
185	  used together or separately.)
186
187config PREEMPT_TRACER
188	bool "Preemption-off Latency Tracer"
189	default n
190	depends on !ARCH_USES_GETTIMEOFFSET
191	depends on PREEMPT
192	select GENERIC_TRACER
193	select TRACER_MAX_TRACE
194	select RING_BUFFER_ALLOW_SWAP
195	select TRACER_SNAPSHOT
196	select TRACER_SNAPSHOT_PER_CPU_SWAP
 
197	help
198	  This option measures the time spent in preemption-off critical
199	  sections, with microsecond accuracy.
200
201	  The default measurement method is a maximum search, which is
202	  disabled by default and can be runtime (re-)started
203	  via:
204
205	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
206
207	  (Note that kernel size and overhead increase with this option
208	  enabled. This option and the irqs-off timing option can be
209	  used together or separately.)
210
211config SCHED_TRACER
212	bool "Scheduling Latency Tracer"
213	select GENERIC_TRACER
214	select CONTEXT_SWITCH_TRACER
215	select TRACER_MAX_TRACE
216	select TRACER_SNAPSHOT
217	help
218	  This tracer tracks the latency of the highest priority task
219	  to be scheduled in, starting from the point it has woken up.
220
221config HWLAT_TRACER
222	bool "Tracer to detect hardware latencies (like SMIs)"
223	select GENERIC_TRACER
224	help
225	 This tracer, when enabled will create one or more kernel threads,
226	 depening on what the cpumask file is set to, which each thread
227	 spinning in a loop looking for interruptions caused by
228	 something other than the kernel. For example, if a
229	 System Management Interrupt (SMI) takes a noticeable amount of
230	 time, this tracer will detect it. This is useful for testing
231	 if a system is reliable for Real Time tasks.
232
233	 Some files are created in the tracing directory when this
234	 is enabled:
235
236	   hwlat_detector/width   - time in usecs for how long to spin for
237	   hwlat_detector/window  - time in usecs between the start of each
238				     iteration
239
240	 A kernel thread is created that will spin with interrupts disabled
241	 for "width" microseconds in every "widow" cycle. It will not spin
242	 for "window - width" microseconds, where the system can
243	 continue to operate.
244
245	 The output will appear in the trace and trace_pipe files.
246
247	 When the tracer is not running, it has no affect on the system,
248	 but when it is running, it can cause the system to be
249	 periodically non responsive. Do not run this tracer on a
250	 production system.
251
252	 To enable this tracer, echo in "hwlat" into the current_tracer
253	 file. Every time a latency is greater than tracing_thresh, it will
254	 be recorded into the ring buffer.
255
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256config ENABLE_DEFAULT_TRACERS
257	bool "Trace process context switches and events"
258	depends on !GENERIC_TRACER
259	select TRACING
260	help
261	  This tracer hooks to various trace points in the kernel,
262	  allowing the user to pick and choose which trace point they
263	  want to trace. It also includes the sched_switch tracer plugin.
264
265config FTRACE_SYSCALLS
266	bool "Trace syscalls"
267	depends on HAVE_SYSCALL_TRACEPOINTS
268	select GENERIC_TRACER
269	select KALLSYMS
270	help
271	  Basic tracer to catch the syscall entry and exit events.
272
273config TRACER_SNAPSHOT
274	bool "Create a snapshot trace buffer"
275	select TRACER_MAX_TRACE
276	help
277	  Allow tracing users to take snapshot of the current buffer using the
278	  ftrace interface, e.g.:
279
280	      echo 1 > /sys/kernel/debug/tracing/snapshot
281	      cat snapshot
282
283config TRACER_SNAPSHOT_PER_CPU_SWAP
284        bool "Allow snapshot to swap per CPU"
285	depends on TRACER_SNAPSHOT
286	select RING_BUFFER_ALLOW_SWAP
287	help
288	  Allow doing a snapshot of a single CPU buffer instead of a
289	  full swap (all buffers). If this is set, then the following is
290	  allowed:
291
292	      echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
293
294	  After which, only the tracing buffer for CPU 2 was swapped with
295	  the main tracing buffer, and the other CPU buffers remain the same.
296
297	  When this is enabled, this adds a little more overhead to the
298	  trace recording, as it needs to add some checks to synchronize
299	  recording with swaps. But this does not affect the performance
300	  of the overall system. This is enabled by default when the preempt
301	  or irq latency tracers are enabled, as those need to swap as well
302	  and already adds the overhead (plus a lot more).
303
304config TRACE_BRANCH_PROFILING
305	bool
306	select GENERIC_TRACER
307
308choice
309	prompt "Branch Profiling"
310	default BRANCH_PROFILE_NONE
311	help
312	 The branch profiling is a software profiler. It will add hooks
313	 into the C conditionals to test which path a branch takes.
314
315	 The likely/unlikely profiler only looks at the conditions that
316	 are annotated with a likely or unlikely macro.
317
318	 The "all branch" profiler will profile every if-statement in the
319	 kernel. This profiler will also enable the likely/unlikely
320	 profiler.
321
322	 Either of the above profilers adds a bit of overhead to the system.
323	 If unsure, choose "No branch profiling".
324
325config BRANCH_PROFILE_NONE
326	bool "No branch profiling"
327	help
328	  No branch profiling. Branch profiling adds a bit of overhead.
329	  Only enable it if you want to analyse the branching behavior.
330	  Otherwise keep it disabled.
331
332config PROFILE_ANNOTATED_BRANCHES
333	bool "Trace likely/unlikely profiler"
334	select TRACE_BRANCH_PROFILING
335	help
336	  This tracer profiles all likely and unlikely macros
337	  in the kernel. It will display the results in:
338
339	  /sys/kernel/debug/tracing/trace_stat/branch_annotated
340
341	  Note: this will add a significant overhead; only turn this
342	  on if you need to profile the system's use of these macros.
343
344config PROFILE_ALL_BRANCHES
345	bool "Profile all if conditionals"
346	select TRACE_BRANCH_PROFILING
347	help
348	  This tracer profiles all branch conditions. Every if ()
349	  taken in the kernel is recorded whether it hit or miss.
350	  The results will be displayed in:
351
352	  /sys/kernel/debug/tracing/trace_stat/branch_all
353
354	  This option also enables the likely/unlikely profiler.
355
356	  This configuration, when enabled, will impose a great overhead
357	  on the system. This should only be enabled when the system
358	  is to be analyzed in much detail.
359endchoice
360
361config TRACING_BRANCHES
362	bool
363	help
364	  Selected by tracers that will trace the likely and unlikely
365	  conditions. This prevents the tracers themselves from being
366	  profiled. Profiling the tracing infrastructure can only happen
367	  when the likelys and unlikelys are not being traced.
368
369config BRANCH_TRACER
370	bool "Trace likely/unlikely instances"
371	depends on TRACE_BRANCH_PROFILING
372	select TRACING_BRANCHES
373	help
374	  This traces the events of likely and unlikely condition
375	  calls in the kernel.  The difference between this and the
376	  "Trace likely/unlikely profiler" is that this is not a
377	  histogram of the callers, but actually places the calling
378	  events into a running trace buffer to see when and where the
379	  events happened, as well as their results.
380
381	  Say N if unsure.
382
383config STACK_TRACER
384	bool "Trace max stack"
385	depends on HAVE_FUNCTION_TRACER
386	select FUNCTION_TRACER
387	select STACKTRACE
388	select KALLSYMS
389	help
390	  This special tracer records the maximum stack footprint of the
391	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
392
393	  This tracer works by hooking into every function call that the
394	  kernel executes, and keeping a maximum stack depth value and
395	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
396	  then it will not have any overhead while the stack tracer
397	  is disabled.
398
399	  To enable the stack tracer on bootup, pass in 'stacktrace'
400	  on the kernel command line.
401
402	  The stack tracer can also be enabled or disabled via the
403	  sysctl kernel.stack_tracer_enabled
404
405	  Say N if unsure.
406
407config BLK_DEV_IO_TRACE
408	bool "Support for tracing block IO actions"
409	depends on SYSFS
410	depends on BLOCK
411	select RELAY
412	select DEBUG_FS
413	select TRACEPOINTS
414	select GENERIC_TRACER
415	select STACKTRACE
416	help
417	  Say Y here if you want to be able to trace the block layer actions
418	  on a given queue. Tracing allows you to see any traffic happening
419	  on a block device queue. For more information (and the userspace
420	  support tools needed), fetch the blktrace tools from:
421
422	  git://git.kernel.dk/blktrace.git
423
424	  Tracing also is possible using the ftrace interface, e.g.:
425
426	    echo 1 > /sys/block/sda/sda1/trace/enable
427	    echo blk > /sys/kernel/debug/tracing/current_tracer
428	    cat /sys/kernel/debug/tracing/trace_pipe
429
430	  If unsure, say N.
431
432config KPROBE_EVENT
433	depends on KPROBES
434	depends on HAVE_REGS_AND_STACK_ACCESS_API
435	bool "Enable kprobes-based dynamic events"
436	select TRACING
437	select PROBE_EVENTS
 
438	default y
439	help
440	  This allows the user to add tracing events (similar to tracepoints)
441	  on the fly via the ftrace interface. See
442	  Documentation/trace/kprobetrace.txt for more details.
443
444	  Those events can be inserted wherever kprobes can probe, and record
445	  various register and memory values.
446
447	  This option is also required by perf-probe subcommand of perf tools.
448	  If you want to use perf tools, this option is strongly recommended.
449
450config UPROBE_EVENT
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
451	bool "Enable uprobes-based dynamic events"
452	depends on ARCH_SUPPORTS_UPROBES
453	depends on MMU
454	depends on PERF_EVENTS
455	select UPROBES
456	select PROBE_EVENTS
 
457	select TRACING
458	default n
459	help
460	  This allows the user to add tracing events on top of userspace
461	  dynamic events (similar to tracepoints) on the fly via the trace
462	  events interface. Those events can be inserted wherever uprobes
463	  can probe, and record various registers.
464	  This option is required if you plan to use perf-probe subcommand
465	  of perf tools on user space applications.
466
467config BPF_EVENTS
468	depends on BPF_SYSCALL
469	depends on (KPROBE_EVENT || UPROBE_EVENT) && PERF_EVENTS
470	bool
471	default y
472	help
473	  This allows the user to attach BPF programs to kprobe events.
 
474
475config PROBE_EVENTS
476	def_bool n
477
478config DYNAMIC_FTRACE
479	bool "enable/disable function tracing dynamically"
480	depends on FUNCTION_TRACER
481	depends on HAVE_DYNAMIC_FTRACE
482	default y
483	help
484	  This option will modify all the calls to function tracing
485	  dynamically (will patch them out of the binary image and
486	  replace them with a No-Op instruction) on boot up. During
487	  compile time, a table is made of all the locations that ftrace
488	  can function trace, and this table is linked into the kernel
489	  image. When this is enabled, functions can be individually
490	  enabled, and the functions not enabled will not affect
491	  performance of the system.
492
493	  See the files in /sys/kernel/debug/tracing:
494	    available_filter_functions
495	    set_ftrace_filter
496	    set_ftrace_notrace
497
498	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
499	  otherwise has native performance as long as no tracing is active.
500
501config DYNAMIC_FTRACE_WITH_REGS
502	def_bool y
503	depends on DYNAMIC_FTRACE
504	depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
505
506config FUNCTION_PROFILER
507	bool "Kernel function profiler"
508	depends on FUNCTION_TRACER
 
509	default n
510	help
511	  This option enables the kernel function profiler. A file is created
512	  in debugfs called function_profile_enabled which defaults to zero.
513	  When a 1 is echoed into this file profiling begins, and when a
514	  zero is entered, profiling stops. A "functions" file is created in
515	  the trace_stats directory; this file shows the list of functions that
516	  have been hit and their counters.
517
518	  If in doubt, say N.
519
520config FTRACE_MCOUNT_RECORD
521	def_bool y
522	depends on DYNAMIC_FTRACE
523	depends on HAVE_FTRACE_MCOUNT_RECORD
524
525config FTRACE_SELFTEST
526	bool
 
527
528config FTRACE_STARTUP_TEST
529	bool "Perform a startup test on ftrace"
530	depends on GENERIC_TRACER
531	select FTRACE_SELFTEST
532	help
533	  This option performs a series of startup tests on ftrace. On bootup
534	  a series of tests are made to verify that the tracer is
535	  functioning properly. It will do tests on all the configured
536	  tracers of ftrace.
537
538config EVENT_TRACE_TEST_SYSCALLS
539	bool "Run selftest on syscall events"
540	depends on FTRACE_STARTUP_TEST
541	help
542	 This option will also enable testing every syscall event.
543	 It only enables the event and disables it and runs various loads
544	 with the event enabled. This adds a bit more time for kernel boot
545	 up since it runs this on every system call defined.
546
547	 TBD - enable a way to actually call the syscalls as we test their
548	       events
549
550config MMIOTRACE
551	bool "Memory mapped IO tracing"
552	depends on HAVE_MMIOTRACE_SUPPORT && PCI
553	select GENERIC_TRACER
554	help
555	  Mmiotrace traces Memory Mapped I/O access and is meant for
556	  debugging and reverse engineering. It is called from the ioremap
557	  implementation and works via page faults. Tracing is disabled by
558	  default and can be enabled at run-time.
559
560	  See Documentation/trace/mmiotrace.txt.
561	  If you are not helping to develop drivers, say N.
 
 
 
 
562
563config TRACING_MAP
564	bool
565	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
566	help
567	  tracing_map is a special-purpose lock-free map for tracing,
568	  separated out as a stand-alone facility in order to allow it
569	  to be shared between multiple tracers.  It isn't meant to be
570	  generally used outside of that context, and is normally
571	  selected by tracers that use it.
572
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
573config HIST_TRIGGERS
574	bool "Histogram triggers"
575	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
576	select TRACING_MAP
577	select TRACING
 
 
578	default n
579	help
580	  Hist triggers allow one or more arbitrary trace event fields
581	  to be aggregated into hash tables and dumped to stdout by
582	  reading a debugfs/tracefs file.  They're useful for
583	  gathering quick and dirty (though precise) summaries of
584	  event activity as an initial guide for further investigation
585	  using more advanced tools.
586
587	  See Documentation/trace/events.txt.
 
 
 
588	  If in doubt, say N.
589
590config MMIOTRACE_TEST
591	tristate "Test module for mmiotrace"
592	depends on MMIOTRACE && m
593	help
594	  This is a dumb module for testing mmiotrace. It is very dangerous
595	  as it will write garbage to IO memory starting at a given address.
596	  However, it should be safe to use on e.g. unused portion of VRAM.
597
598	  Say N, unless you absolutely know what you are doing.
599
600config TRACEPOINT_BENCHMARK
601        bool "Add tracepoint that benchmarks tracepoints"
602	help
603	 This option creates the tracepoint "benchmark:benchmark_event".
604	 When the tracepoint is enabled, it kicks off a kernel thread that
605	 goes into an infinite loop (calling cond_sched() to let other tasks
606	 run), and calls the tracepoint. Each iteration will record the time
607	 it took to write to the tracepoint and the next iteration that
608	 data will be passed to the tracepoint itself. That is, the tracepoint
609	 will report the time it took to do the previous tracepoint.
610	 The string written to the tracepoint is a static string of 128 bytes
611	 to keep the time the same. The initial string is simply a write of
612	 "START". The second string records the cold cache time of the first
613	 write which is not added to the rest of the calculations.
614
615	 As it is a tight loop, it benchmarks as hot cache. That's fine because
616	 we care most about hot paths that are probably in cache already.
617
618	 An example of the output:
619
620	      START
621	      first=3672 [COLD CACHED]
622	      last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712
623	      last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337
624	      last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064
625	      last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411
626	      last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389
627	      last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
628
629
630config RING_BUFFER_BENCHMARK
631	tristate "Ring buffer benchmark stress tester"
632	depends on RING_BUFFER
633	help
634	  This option creates a test to stress the ring buffer and benchmark it.
635	  It creates its own ring buffer such that it will not interfere with
636	  any other users of the ring buffer (such as ftrace). It then creates
637	  a producer and consumer that will run for 10 seconds and sleep for
638	  10 seconds. Each interval it will print out the number of events
639	  it recorded and give a rough estimate of how long each iteration took.
640
641	  It does not disable interrupts or raise its priority, so it may be
642	  affected by processes that are running.
643
644	  If unsure, say N.
645
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
646config RING_BUFFER_STARTUP_TEST
647       bool "Ring buffer startup self test"
648       depends on RING_BUFFER
649       help
650         Run a simple self test on the ring buffer on boot up. Late in the
651	 kernel boot sequence, the test will start that kicks off
652	 a thread per cpu. Each thread will write various size events
653	 into the ring buffer. Another thread is created to send IPIs
654	 to each of the threads, where the IPI handler will also write
655	 to the ring buffer, to test/stress the nesting ability.
656	 If any anomalies are discovered, a warning will be displayed
657	 and all ring buffers will be disabled.
658
659	 The test runs for 10 seconds. This will slow your boot time
660	 by at least 10 more seconds.
661
662	 At the end of the test, statics and more checks are done.
663	 It will output the stats of each per cpu buffer. What
664	 was written, the sizes, what was read, what was lost, and
665	 other similar details.
666
667	 If unsure, say N
668
669config TRACE_ENUM_MAP_FILE
670       bool "Show enum mappings for trace events"
671       depends on TRACING
672       help
673        The "print fmt" of the trace events will show the enum names instead
674	of their values. This can cause problems for user space tools that
675	use this string to parse the raw data as user space does not know
676	how to convert the string to its value.
 
 
 
 
 
 
 
 
677
678	To fix this, there's a special macro in the kernel that can be used
679	to convert the enum into its value. If this macro is used, then the
680	print fmt strings will have the enums converted to their values.
681
682	If something does not get converted properly, this option can be
683	used to show what enums the kernel tried to convert.
 
 
 
 
 
684
685	This option is for debugging the enum conversions. A file is created
686	in the tracing directory called "enum_map" that will show the enum
687	names matched with their values and what trace event system they
688	belong too.
689
690	Normally, the mapping of the strings to values will be freed after
691	boot up or module load. With this option, they will not be freed, as
692	they are needed for the "enum_map" file. Enabling this option will
693	increase the memory footprint of the running kernel.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
694
695	If unsure, say N
 
696
697config TRACING_EVENTS_GPIO
698	bool "Trace gpio events"
699	depends on GPIOLIB
700	default y
 
701	help
702	  Enable tracing events for gpio subsystem
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
703
704endif # FTRACE
705
706endif # TRACING_SUPPORT
707
v5.14.15
   1# SPDX-License-Identifier: GPL-2.0-only
   2#
   3# Architectures that offer an FUNCTION_TRACER implementation should
   4#  select HAVE_FUNCTION_TRACER:
   5#
   6
   7config USER_STACKTRACE_SUPPORT
   8	bool
   9
  10config NOP_TRACER
  11	bool
  12
 
 
 
 
 
  13config HAVE_FUNCTION_TRACER
  14	bool
  15	help
  16	  See Documentation/trace/ftrace-design.rst
  17
  18config HAVE_FUNCTION_GRAPH_TRACER
  19	bool
  20	help
  21	  See Documentation/trace/ftrace-design.rst
  22
  23config HAVE_DYNAMIC_FTRACE
  24	bool
  25	help
  26	  See Documentation/trace/ftrace-design.rst
  27
  28config HAVE_DYNAMIC_FTRACE_WITH_REGS
  29	bool
  30
  31config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
  32	bool
  33
  34config HAVE_DYNAMIC_FTRACE_WITH_ARGS
  35	bool
  36	help
  37	 If this is set, then arguments and stack can be found from
  38	 the pt_regs passed into the function callback regs parameter
  39	 by default, even without setting the REGS flag in the ftrace_ops.
  40	 This allows for use of regs_get_kernel_argument() and
  41	 kernel_stack_pointer().
  42
  43config HAVE_FTRACE_MCOUNT_RECORD
  44	bool
  45	help
  46	  See Documentation/trace/ftrace-design.rst
  47
  48config HAVE_SYSCALL_TRACEPOINTS
  49	bool
  50	help
  51	  See Documentation/trace/ftrace-design.rst
  52
  53config HAVE_FENTRY
  54	bool
  55	help
  56	  Arch supports the gcc options -pg with -mfentry
  57
  58config HAVE_NOP_MCOUNT
  59	bool
  60	help
  61	  Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount
  62
  63config HAVE_OBJTOOL_MCOUNT
  64	bool
  65	help
  66	  Arch supports objtool --mcount
  67
  68config HAVE_C_RECORDMCOUNT
  69	bool
  70	help
  71	  C version of recordmcount available?
  72
  73config TRACER_MAX_TRACE
  74	bool
  75
  76config TRACE_CLOCK
  77	bool
  78
  79config RING_BUFFER
  80	bool
  81	select TRACE_CLOCK
  82	select IRQ_WORK
  83
 
 
 
 
 
  84config EVENT_TRACING
  85	select CONTEXT_SWITCH_TRACER
  86	select GLOB
  87	bool
  88
  89config CONTEXT_SWITCH_TRACER
  90	bool
  91
  92config RING_BUFFER_ALLOW_SWAP
  93	bool
  94	help
  95	 Allow the use of ring_buffer_swap_cpu.
  96	 Adds a very slight overhead to tracing when enabled.
  97
  98config PREEMPTIRQ_TRACEPOINTS
  99	bool
 100	depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS
 101	select TRACING
 102	default y
 103	help
 104	  Create preempt/irq toggle tracepoints if needed, so that other parts
 105	  of the kernel can use them to generate or add hooks to them.
 106
 107# All tracer options should select GENERIC_TRACER. For those options that are
 108# enabled by all tracers (context switch and event tracer) they select TRACING.
 109# This allows those options to appear when no other tracer is selected. But the
 110# options do not appear when something else selects it. We need the two options
 111# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
 112# hiding of the automatic options.
 113
 114config TRACING
 115	bool
 
 116	select RING_BUFFER
 117	select STACKTRACE if STACKTRACE_SUPPORT
 118	select TRACEPOINTS
 119	select NOP_TRACER
 120	select BINARY_PRINTF
 121	select EVENT_TRACING
 122	select TRACE_CLOCK
 123
 124config GENERIC_TRACER
 125	bool
 126	select TRACING
 127
 128#
 129# Minimum requirements an architecture has to meet for us to
 130# be able to offer generic tracing facilities:
 131#
 132config TRACING_SUPPORT
 133	bool
 134	depends on TRACE_IRQFLAGS_SUPPORT
 
 
 
 
 135	depends on STACKTRACE_SUPPORT
 136	default y
 137
 138if TRACING_SUPPORT
 139
 140menuconfig FTRACE
 141	bool "Tracers"
 142	default y if DEBUG_KERNEL
 143	help
 144	  Enable the kernel tracing infrastructure.
 145
 146if FTRACE
 147
 148config BOOTTIME_TRACING
 149	bool "Boot-time Tracing support"
 150	depends on TRACING
 151	select BOOT_CONFIG
 152	help
 153	  Enable developer to setup ftrace subsystem via supplemental
 154	  kernel cmdline at boot time for debugging (tracing) driver
 155	  initialization and boot process.
 156
 157config FUNCTION_TRACER
 158	bool "Kernel Function Tracer"
 159	depends on HAVE_FUNCTION_TRACER
 160	select KALLSYMS
 161	select GENERIC_TRACER
 162	select CONTEXT_SWITCH_TRACER
 163	select GLOB
 164	select TASKS_RCU if PREEMPTION
 165	select TASKS_RUDE_RCU
 166	help
 167	  Enable the kernel to trace every kernel function. This is done
 168	  by using a compiler feature to insert a small, 5-byte No-Operation
 169	  instruction at the beginning of every kernel function, which NOP
 170	  sequence is then dynamically patched into a tracer call when
 171	  tracing is enabled by the administrator. If it's runtime disabled
 172	  (the bootup default), then the overhead of the instructions is very
 173	  small and not measurable even in micro-benchmarks.
 174
 175config FUNCTION_GRAPH_TRACER
 176	bool "Kernel Function Graph Tracer"
 177	depends on HAVE_FUNCTION_GRAPH_TRACER
 178	depends on FUNCTION_TRACER
 179	depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
 180	default y
 181	help
 182	  Enable the kernel to trace a function at both its return
 183	  and its entry.
 184	  Its first purpose is to trace the duration of functions and
 185	  draw a call graph for each thread with some information like
 186	  the return value. This is done by setting the current return
 187	  address on the current task structure into a stack of calls.
 188
 189config DYNAMIC_FTRACE
 190	bool "enable/disable function tracing dynamically"
 191	depends on FUNCTION_TRACER
 192	depends on HAVE_DYNAMIC_FTRACE
 193	default y
 194	help
 195	  This option will modify all the calls to function tracing
 196	  dynamically (will patch them out of the binary image and
 197	  replace them with a No-Op instruction) on boot up. During
 198	  compile time, a table is made of all the locations that ftrace
 199	  can function trace, and this table is linked into the kernel
 200	  image. When this is enabled, functions can be individually
 201	  enabled, and the functions not enabled will not affect
 202	  performance of the system.
 203
 204	  See the files in /sys/kernel/debug/tracing:
 205	    available_filter_functions
 206	    set_ftrace_filter
 207	    set_ftrace_notrace
 208
 209	  This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
 210	  otherwise has native performance as long as no tracing is active.
 211
 212config DYNAMIC_FTRACE_WITH_REGS
 213	def_bool y
 214	depends on DYNAMIC_FTRACE
 215	depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
 216
 217config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
 218	def_bool y
 219	depends on DYNAMIC_FTRACE_WITH_REGS
 220	depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
 221
 222config DYNAMIC_FTRACE_WITH_ARGS
 223	def_bool y
 224	depends on DYNAMIC_FTRACE
 225	depends on HAVE_DYNAMIC_FTRACE_WITH_ARGS
 226
 227config FUNCTION_PROFILER
 228	bool "Kernel function profiler"
 229	depends on FUNCTION_TRACER
 230	default n
 231	help
 232	  This option enables the kernel function profiler. A file is created
 233	  in debugfs called function_profile_enabled which defaults to zero.
 234	  When a 1 is echoed into this file profiling begins, and when a
 235	  zero is entered, profiling stops. A "functions" file is created in
 236	  the trace_stat directory; this file shows the list of functions that
 237	  have been hit and their counters.
 238
 239	  If in doubt, say N.
 240
 241config STACK_TRACER
 242	bool "Trace max stack"
 243	depends on HAVE_FUNCTION_TRACER
 244	select FUNCTION_TRACER
 245	select STACKTRACE
 246	select KALLSYMS
 247	help
 248	  This special tracer records the maximum stack footprint of the
 249	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
 250
 251	  This tracer works by hooking into every function call that the
 252	  kernel executes, and keeping a maximum stack depth value and
 253	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
 254	  then it will not have any overhead while the stack tracer
 255	  is disabled.
 256
 257	  To enable the stack tracer on bootup, pass in 'stacktrace'
 258	  on the kernel command line.
 259
 260	  The stack tracer can also be enabled or disabled via the
 261	  sysctl kernel.stack_tracer_enabled
 262
 263	  Say N if unsure.
 264
 265config TRACE_PREEMPT_TOGGLE
 266	bool
 267	help
 268	  Enables hooks which will be called when preemption is first disabled,
 269	  and last enabled.
 270
 271config IRQSOFF_TRACER
 272	bool "Interrupts-off Latency Tracer"
 273	default n
 274	depends on TRACE_IRQFLAGS_SUPPORT
 
 275	select TRACE_IRQFLAGS
 276	select GENERIC_TRACER
 277	select TRACER_MAX_TRACE
 278	select RING_BUFFER_ALLOW_SWAP
 279	select TRACER_SNAPSHOT
 280	select TRACER_SNAPSHOT_PER_CPU_SWAP
 281	help
 282	  This option measures the time spent in irqs-off critical
 283	  sections, with microsecond accuracy.
 284
 285	  The default measurement method is a maximum search, which is
 286	  disabled by default and can be runtime (re-)started
 287	  via:
 288
 289	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
 290
 291	  (Note that kernel size and overhead increase with this option
 292	  enabled. This option and the preempt-off timing option can be
 293	  used together or separately.)
 294
 295config PREEMPT_TRACER
 296	bool "Preemption-off Latency Tracer"
 297	default n
 298	depends on PREEMPTION
 
 299	select GENERIC_TRACER
 300	select TRACER_MAX_TRACE
 301	select RING_BUFFER_ALLOW_SWAP
 302	select TRACER_SNAPSHOT
 303	select TRACER_SNAPSHOT_PER_CPU_SWAP
 304	select TRACE_PREEMPT_TOGGLE
 305	help
 306	  This option measures the time spent in preemption-off critical
 307	  sections, with microsecond accuracy.
 308
 309	  The default measurement method is a maximum search, which is
 310	  disabled by default and can be runtime (re-)started
 311	  via:
 312
 313	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
 314
 315	  (Note that kernel size and overhead increase with this option
 316	  enabled. This option and the irqs-off timing option can be
 317	  used together or separately.)
 318
 319config SCHED_TRACER
 320	bool "Scheduling Latency Tracer"
 321	select GENERIC_TRACER
 322	select CONTEXT_SWITCH_TRACER
 323	select TRACER_MAX_TRACE
 324	select TRACER_SNAPSHOT
 325	help
 326	  This tracer tracks the latency of the highest priority task
 327	  to be scheduled in, starting from the point it has woken up.
 328
 329config HWLAT_TRACER
 330	bool "Tracer to detect hardware latencies (like SMIs)"
 331	select GENERIC_TRACER
 332	help
 333	 This tracer, when enabled will create one or more kernel threads,
 334	 depending on what the cpumask file is set to, which each thread
 335	 spinning in a loop looking for interruptions caused by
 336	 something other than the kernel. For example, if a
 337	 System Management Interrupt (SMI) takes a noticeable amount of
 338	 time, this tracer will detect it. This is useful for testing
 339	 if a system is reliable for Real Time tasks.
 340
 341	 Some files are created in the tracing directory when this
 342	 is enabled:
 343
 344	   hwlat_detector/width   - time in usecs for how long to spin for
 345	   hwlat_detector/window  - time in usecs between the start of each
 346				     iteration
 347
 348	 A kernel thread is created that will spin with interrupts disabled
 349	 for "width" microseconds in every "window" cycle. It will not spin
 350	 for "window - width" microseconds, where the system can
 351	 continue to operate.
 352
 353	 The output will appear in the trace and trace_pipe files.
 354
 355	 When the tracer is not running, it has no affect on the system,
 356	 but when it is running, it can cause the system to be
 357	 periodically non responsive. Do not run this tracer on a
 358	 production system.
 359
 360	 To enable this tracer, echo in "hwlat" into the current_tracer
 361	 file. Every time a latency is greater than tracing_thresh, it will
 362	 be recorded into the ring buffer.
 363
 364config OSNOISE_TRACER
 365	bool "OS Noise tracer"
 366	select GENERIC_TRACER
 367	help
 368	  In the context of high-performance computing (HPC), the Operating
 369	  System Noise (osnoise) refers to the interference experienced by an
 370	  application due to activities inside the operating system. In the
 371	  context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread
 372	  can cause noise to the system. Moreover, hardware-related jobs can
 373	  also cause noise, for example, via SMIs.
 374
 375	  The osnoise tracer leverages the hwlat_detector by running a similar
 376	  loop with preemption, SoftIRQs and IRQs enabled, thus allowing all
 377	  the sources of osnoise during its execution. The osnoise tracer takes
 378	  note of the entry and exit point of any source of interferences,
 379	  increasing a per-cpu interference counter. It saves an interference
 380	  counter for each source of interference. The interference counter for
 381	  NMI, IRQs, SoftIRQs, and threads is increased anytime the tool
 382	  observes these interferences' entry events. When a noise happens
 383	  without any interference from the operating system level, the
 384	  hardware noise counter increases, pointing to a hardware-related
 385	  noise. In this way, osnoise can account for any source of
 386	  interference. At the end of the period, the osnoise tracer prints
 387	  the sum of all noise, the max single noise, the percentage of CPU
 388	  available for the thread, and the counters for the noise sources.
 389
 390	  In addition to the tracer, a set of tracepoints were added to
 391	  facilitate the identification of the osnoise source.
 392
 393	  The output will appear in the trace and trace_pipe files.
 394
 395	  To enable this tracer, echo in "osnoise" into the current_tracer
 396          file.
 397
 398config TIMERLAT_TRACER
 399	bool "Timerlat tracer"
 400	select OSNOISE_TRACER
 401	select GENERIC_TRACER
 402	help
 403	  The timerlat tracer aims to help the preemptive kernel developers
 404	  to find sources of wakeup latencies of real-time threads.
 405
 406	  The tracer creates a per-cpu kernel thread with real-time priority.
 407	  The tracer thread sets a periodic timer to wakeup itself, and goes
 408	  to sleep waiting for the timer to fire. At the wakeup, the thread
 409	  then computes a wakeup latency value as the difference between
 410	  the current time and the absolute time that the timer was set
 411	  to expire.
 412
 413	  The tracer prints two lines at every activation. The first is the
 414	  timer latency observed at the hardirq context before the
 415	  activation of the thread. The second is the timer latency observed
 416	  by the thread, which is the same level that cyclictest reports. The
 417	  ACTIVATION ID field serves to relate the irq execution to its
 418	  respective thread execution.
 419
 420	  The tracer is build on top of osnoise tracer, and the osnoise:
 421	  events can be used to trace the source of interference from NMI,
 422	  IRQs and other threads. It also enables the capture of the
 423	  stacktrace at the IRQ context, which helps to identify the code
 424	  path that can cause thread delay.
 425
 426config MMIOTRACE
 427	bool "Memory mapped IO tracing"
 428	depends on HAVE_MMIOTRACE_SUPPORT && PCI
 429	select GENERIC_TRACER
 430	help
 431	  Mmiotrace traces Memory Mapped I/O access and is meant for
 432	  debugging and reverse engineering. It is called from the ioremap
 433	  implementation and works via page faults. Tracing is disabled by
 434	  default and can be enabled at run-time.
 435
 436	  See Documentation/trace/mmiotrace.rst.
 437	  If you are not helping to develop drivers, say N.
 438
 439config ENABLE_DEFAULT_TRACERS
 440	bool "Trace process context switches and events"
 441	depends on !GENERIC_TRACER
 442	select TRACING
 443	help
 444	  This tracer hooks to various trace points in the kernel,
 445	  allowing the user to pick and choose which trace point they
 446	  want to trace. It also includes the sched_switch tracer plugin.
 447
 448config FTRACE_SYSCALLS
 449	bool "Trace syscalls"
 450	depends on HAVE_SYSCALL_TRACEPOINTS
 451	select GENERIC_TRACER
 452	select KALLSYMS
 453	help
 454	  Basic tracer to catch the syscall entry and exit events.
 455
 456config TRACER_SNAPSHOT
 457	bool "Create a snapshot trace buffer"
 458	select TRACER_MAX_TRACE
 459	help
 460	  Allow tracing users to take snapshot of the current buffer using the
 461	  ftrace interface, e.g.:
 462
 463	      echo 1 > /sys/kernel/debug/tracing/snapshot
 464	      cat snapshot
 465
 466config TRACER_SNAPSHOT_PER_CPU_SWAP
 467	bool "Allow snapshot to swap per CPU"
 468	depends on TRACER_SNAPSHOT
 469	select RING_BUFFER_ALLOW_SWAP
 470	help
 471	  Allow doing a snapshot of a single CPU buffer instead of a
 472	  full swap (all buffers). If this is set, then the following is
 473	  allowed:
 474
 475	      echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
 476
 477	  After which, only the tracing buffer for CPU 2 was swapped with
 478	  the main tracing buffer, and the other CPU buffers remain the same.
 479
 480	  When this is enabled, this adds a little more overhead to the
 481	  trace recording, as it needs to add some checks to synchronize
 482	  recording with swaps. But this does not affect the performance
 483	  of the overall system. This is enabled by default when the preempt
 484	  or irq latency tracers are enabled, as those need to swap as well
 485	  and already adds the overhead (plus a lot more).
 486
 487config TRACE_BRANCH_PROFILING
 488	bool
 489	select GENERIC_TRACER
 490
 491choice
 492	prompt "Branch Profiling"
 493	default BRANCH_PROFILE_NONE
 494	help
 495	 The branch profiling is a software profiler. It will add hooks
 496	 into the C conditionals to test which path a branch takes.
 497
 498	 The likely/unlikely profiler only looks at the conditions that
 499	 are annotated with a likely or unlikely macro.
 500
 501	 The "all branch" profiler will profile every if-statement in the
 502	 kernel. This profiler will also enable the likely/unlikely
 503	 profiler.
 504
 505	 Either of the above profilers adds a bit of overhead to the system.
 506	 If unsure, choose "No branch profiling".
 507
 508config BRANCH_PROFILE_NONE
 509	bool "No branch profiling"
 510	help
 511	  No branch profiling. Branch profiling adds a bit of overhead.
 512	  Only enable it if you want to analyse the branching behavior.
 513	  Otherwise keep it disabled.
 514
 515config PROFILE_ANNOTATED_BRANCHES
 516	bool "Trace likely/unlikely profiler"
 517	select TRACE_BRANCH_PROFILING
 518	help
 519	  This tracer profiles all likely and unlikely macros
 520	  in the kernel. It will display the results in:
 521
 522	  /sys/kernel/debug/tracing/trace_stat/branch_annotated
 523
 524	  Note: this will add a significant overhead; only turn this
 525	  on if you need to profile the system's use of these macros.
 526
 527config PROFILE_ALL_BRANCHES
 528	bool "Profile all if conditionals" if !FORTIFY_SOURCE
 529	select TRACE_BRANCH_PROFILING
 530	help
 531	  This tracer profiles all branch conditions. Every if ()
 532	  taken in the kernel is recorded whether it hit or miss.
 533	  The results will be displayed in:
 534
 535	  /sys/kernel/debug/tracing/trace_stat/branch_all
 536
 537	  This option also enables the likely/unlikely profiler.
 538
 539	  This configuration, when enabled, will impose a great overhead
 540	  on the system. This should only be enabled when the system
 541	  is to be analyzed in much detail.
 542endchoice
 543
 544config TRACING_BRANCHES
 545	bool
 546	help
 547	  Selected by tracers that will trace the likely and unlikely
 548	  conditions. This prevents the tracers themselves from being
 549	  profiled. Profiling the tracing infrastructure can only happen
 550	  when the likelys and unlikelys are not being traced.
 551
 552config BRANCH_TRACER
 553	bool "Trace likely/unlikely instances"
 554	depends on TRACE_BRANCH_PROFILING
 555	select TRACING_BRANCHES
 556	help
 557	  This traces the events of likely and unlikely condition
 558	  calls in the kernel.  The difference between this and the
 559	  "Trace likely/unlikely profiler" is that this is not a
 560	  histogram of the callers, but actually places the calling
 561	  events into a running trace buffer to see when and where the
 562	  events happened, as well as their results.
 563
 564	  Say N if unsure.
 565
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 566config BLK_DEV_IO_TRACE
 567	bool "Support for tracing block IO actions"
 568	depends on SYSFS
 569	depends on BLOCK
 570	select RELAY
 571	select DEBUG_FS
 572	select TRACEPOINTS
 573	select GENERIC_TRACER
 574	select STACKTRACE
 575	help
 576	  Say Y here if you want to be able to trace the block layer actions
 577	  on a given queue. Tracing allows you to see any traffic happening
 578	  on a block device queue. For more information (and the userspace
 579	  support tools needed), fetch the blktrace tools from:
 580
 581	  git://git.kernel.dk/blktrace.git
 582
 583	  Tracing also is possible using the ftrace interface, e.g.:
 584
 585	    echo 1 > /sys/block/sda/sda1/trace/enable
 586	    echo blk > /sys/kernel/debug/tracing/current_tracer
 587	    cat /sys/kernel/debug/tracing/trace_pipe
 588
 589	  If unsure, say N.
 590
 591config KPROBE_EVENTS
 592	depends on KPROBES
 593	depends on HAVE_REGS_AND_STACK_ACCESS_API
 594	bool "Enable kprobes-based dynamic events"
 595	select TRACING
 596	select PROBE_EVENTS
 597	select DYNAMIC_EVENTS
 598	default y
 599	help
 600	  This allows the user to add tracing events (similar to tracepoints)
 601	  on the fly via the ftrace interface. See
 602	  Documentation/trace/kprobetrace.rst for more details.
 603
 604	  Those events can be inserted wherever kprobes can probe, and record
 605	  various register and memory values.
 606
 607	  This option is also required by perf-probe subcommand of perf tools.
 608	  If you want to use perf tools, this option is strongly recommended.
 609
 610config KPROBE_EVENTS_ON_NOTRACE
 611	bool "Do NOT protect notrace function from kprobe events"
 612	depends on KPROBE_EVENTS
 613	depends on DYNAMIC_FTRACE
 614	default n
 615	help
 616	  This is only for the developers who want to debug ftrace itself
 617	  using kprobe events.
 618
 619	  If kprobes can use ftrace instead of breakpoint, ftrace related
 620	  functions are protected from kprobe-events to prevent an infinite
 621	  recursion or any unexpected execution path which leads to a kernel
 622	  crash.
 623
 624	  This option disables such protection and allows you to put kprobe
 625	  events on ftrace functions for debugging ftrace by itself.
 626	  Note that this might let you shoot yourself in the foot.
 627
 628	  If unsure, say N.
 629
 630config UPROBE_EVENTS
 631	bool "Enable uprobes-based dynamic events"
 632	depends on ARCH_SUPPORTS_UPROBES
 633	depends on MMU
 634	depends on PERF_EVENTS
 635	select UPROBES
 636	select PROBE_EVENTS
 637	select DYNAMIC_EVENTS
 638	select TRACING
 639	default y
 640	help
 641	  This allows the user to add tracing events on top of userspace
 642	  dynamic events (similar to tracepoints) on the fly via the trace
 643	  events interface. Those events can be inserted wherever uprobes
 644	  can probe, and record various registers.
 645	  This option is required if you plan to use perf-probe subcommand
 646	  of perf tools on user space applications.
 647
 648config BPF_EVENTS
 649	depends on BPF_SYSCALL
 650	depends on (KPROBE_EVENTS || UPROBE_EVENTS) && PERF_EVENTS
 651	bool
 652	default y
 653	help
 654	  This allows the user to attach BPF programs to kprobe, uprobe, and
 655	  tracepoint events.
 656
 657config DYNAMIC_EVENTS
 658	def_bool n
 659
 660config PROBE_EVENTS
 661	def_bool n
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 662
 663config BPF_KPROBE_OVERRIDE
 664	bool "Enable BPF programs to override a kprobed function"
 665	depends on BPF_EVENTS
 666	depends on FUNCTION_ERROR_INJECTION
 667	default n
 668	help
 669	 Allows BPF to override the execution of a probed function and
 670	 set a different return value.  This is used for error injection.
 
 
 
 
 
 
 671
 672config FTRACE_MCOUNT_RECORD
 673	def_bool y
 674	depends on DYNAMIC_FTRACE
 675	depends on HAVE_FTRACE_MCOUNT_RECORD
 676
 677config FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
 678	bool
 679	depends on FTRACE_MCOUNT_RECORD
 680
 681config FTRACE_MCOUNT_USE_CC
 682	def_bool y
 683	depends on $(cc-option,-mrecord-mcount)
 684	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
 685	depends on FTRACE_MCOUNT_RECORD
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 686
 687config FTRACE_MCOUNT_USE_OBJTOOL
 688	def_bool y
 689	depends on HAVE_OBJTOOL_MCOUNT
 690	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
 691	depends on !FTRACE_MCOUNT_USE_CC
 692	depends on FTRACE_MCOUNT_RECORD
 
 
 
 693
 694config FTRACE_MCOUNT_USE_RECORDMCOUNT
 695	def_bool y
 696	depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY
 697	depends on !FTRACE_MCOUNT_USE_CC
 698	depends on !FTRACE_MCOUNT_USE_OBJTOOL
 699	depends on FTRACE_MCOUNT_RECORD
 700
 701config TRACING_MAP
 702	bool
 703	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
 704	help
 705	  tracing_map is a special-purpose lock-free map for tracing,
 706	  separated out as a stand-alone facility in order to allow it
 707	  to be shared between multiple tracers.  It isn't meant to be
 708	  generally used outside of that context, and is normally
 709	  selected by tracers that use it.
 710
 711config SYNTH_EVENTS
 712	bool "Synthetic trace events"
 713	select TRACING
 714	select DYNAMIC_EVENTS
 715	default n
 716	help
 717	  Synthetic events are user-defined trace events that can be
 718	  used to combine data from other trace events or in fact any
 719	  data source.  Synthetic events can be generated indirectly
 720	  via the trace() action of histogram triggers or directly
 721	  by way of an in-kernel API.
 722
 723	  See Documentation/trace/events.rst or
 724	  Documentation/trace/histogram.rst for details and examples.
 725
 726	  If in doubt, say N.
 727
 728config HIST_TRIGGERS
 729	bool "Histogram triggers"
 730	depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
 731	select TRACING_MAP
 732	select TRACING
 733	select DYNAMIC_EVENTS
 734	select SYNTH_EVENTS
 735	default n
 736	help
 737	  Hist triggers allow one or more arbitrary trace event fields
 738	  to be aggregated into hash tables and dumped to stdout by
 739	  reading a debugfs/tracefs file.  They're useful for
 740	  gathering quick and dirty (though precise) summaries of
 741	  event activity as an initial guide for further investigation
 742	  using more advanced tools.
 743
 744	  Inter-event tracing of quantities such as latencies is also
 745	  supported using hist triggers under this option.
 746
 747	  See Documentation/trace/histogram.rst.
 748	  If in doubt, say N.
 749
 750config TRACE_EVENT_INJECT
 751	bool "Trace event injection"
 752	depends on TRACING
 753	help
 754	  Allow user-space to inject a specific trace event into the ring
 755	  buffer. This is mainly used for testing purpose.
 
 756
 757	  If unsure, say N.
 758
 759config TRACEPOINT_BENCHMARK
 760	bool "Add tracepoint that benchmarks tracepoints"
 761	help
 762	 This option creates the tracepoint "benchmark:benchmark_event".
 763	 When the tracepoint is enabled, it kicks off a kernel thread that
 764	 goes into an infinite loop (calling cond_resched() to let other tasks
 765	 run), and calls the tracepoint. Each iteration will record the time
 766	 it took to write to the tracepoint and the next iteration that
 767	 data will be passed to the tracepoint itself. That is, the tracepoint
 768	 will report the time it took to do the previous tracepoint.
 769	 The string written to the tracepoint is a static string of 128 bytes
 770	 to keep the time the same. The initial string is simply a write of
 771	 "START". The second string records the cold cache time of the first
 772	 write which is not added to the rest of the calculations.
 773
 774	 As it is a tight loop, it benchmarks as hot cache. That's fine because
 775	 we care most about hot paths that are probably in cache already.
 776
 777	 An example of the output:
 778
 779	      START
 780	      first=3672 [COLD CACHED]
 781	      last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712
 782	      last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337
 783	      last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064
 784	      last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411
 785	      last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389
 786	      last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
 787
 788
 789config RING_BUFFER_BENCHMARK
 790	tristate "Ring buffer benchmark stress tester"
 791	depends on RING_BUFFER
 792	help
 793	  This option creates a test to stress the ring buffer and benchmark it.
 794	  It creates its own ring buffer such that it will not interfere with
 795	  any other users of the ring buffer (such as ftrace). It then creates
 796	  a producer and consumer that will run for 10 seconds and sleep for
 797	  10 seconds. Each interval it will print out the number of events
 798	  it recorded and give a rough estimate of how long each iteration took.
 799
 800	  It does not disable interrupts or raise its priority, so it may be
 801	  affected by processes that are running.
 802
 803	  If unsure, say N.
 804
 805config TRACE_EVAL_MAP_FILE
 806       bool "Show eval mappings for trace events"
 807       depends on TRACING
 808       help
 809	The "print fmt" of the trace events will show the enum/sizeof names
 810	instead of their values. This can cause problems for user space tools
 811	that use this string to parse the raw data as user space does not know
 812	how to convert the string to its value.
 813
 814	To fix this, there's a special macro in the kernel that can be used
 815	to convert an enum/sizeof into its value. If this macro is used, then
 816	the print fmt strings will be converted to their values.
 817
 818	If something does not get converted properly, this option can be
 819	used to show what enums/sizeof the kernel tried to convert.
 820
 821	This option is for debugging the conversions. A file is created
 822	in the tracing directory called "eval_map" that will show the
 823	names matched with their values and what trace event system they
 824	belong too.
 825
 826	Normally, the mapping of the strings to values will be freed after
 827	boot up or module load. With this option, they will not be freed, as
 828	they are needed for the "eval_map" file. Enabling this option will
 829	increase the memory footprint of the running kernel.
 830
 831	If unsure, say N.
 832
 833config FTRACE_RECORD_RECURSION
 834	bool "Record functions that recurse in function tracing"
 835	depends on FUNCTION_TRACER
 836	help
 837	  All callbacks that attach to the function tracing have some sort
 838	  of protection against recursion. Even though the protection exists,
 839	  it adds overhead. This option will create a file in the tracefs
 840	  file system called "recursed_functions" that will list the functions
 841	  that triggered a recursion.
 842
 843	  This will add more overhead to cases that have recursion.
 844
 845	  If unsure, say N
 846
 847config FTRACE_RECORD_RECURSION_SIZE
 848	int "Max number of recursed functions to record"
 849	default	128
 850	depends on FTRACE_RECORD_RECURSION
 851	help
 852	  This defines the limit of number of functions that can be
 853	  listed in the "recursed_functions" file, that lists all
 854	  the functions that caused a recursion to happen.
 855	  This file can be reset, but the limit can not change in
 856	  size at runtime.
 857
 858config RING_BUFFER_RECORD_RECURSION
 859	bool "Record functions that recurse in the ring buffer"
 860	depends on FTRACE_RECORD_RECURSION
 861	# default y, because it is coupled with FTRACE_RECORD_RECURSION
 862	default y
 863	help
 864	  The ring buffer has its own internal recursion. Although when
 865	  recursion happens it wont cause harm because of the protection,
 866	  but it does cause an unwanted overhead. Enabling this option will
 867	  place where recursion was detected into the ftrace "recursed_functions"
 868	  file.
 869
 870	  This will add more overhead to cases that have recursion.
 871
 872config GCOV_PROFILE_FTRACE
 873	bool "Enable GCOV profiling on ftrace subsystem"
 874	depends on GCOV_KERNEL
 875	help
 876	  Enable GCOV profiling on ftrace subsystem for checking
 877	  which functions/lines are tested.
 878
 879	  If unsure, say N.
 880
 881	  Note that on a kernel compiled with this config, ftrace will
 882	  run significantly slower.
 883
 884config FTRACE_SELFTEST
 885	bool
 886
 887config FTRACE_STARTUP_TEST
 888	bool "Perform a startup test on ftrace"
 889	depends on GENERIC_TRACER
 890	select FTRACE_SELFTEST
 891	help
 892	  This option performs a series of startup tests on ftrace. On bootup
 893	  a series of tests are made to verify that the tracer is
 894	  functioning properly. It will do tests on all the configured
 895	  tracers of ftrace.
 896
 897config EVENT_TRACE_STARTUP_TEST
 898	bool "Run selftest on trace events"
 899	depends on FTRACE_STARTUP_TEST
 900	default y
 901	help
 902	  This option performs a test on all trace events in the system.
 903	  It basically just enables each event and runs some code that
 904	  will trigger events (not necessarily the event it enables)
 905	  This may take some time run as there are a lot of events.
 906
 907config EVENT_TRACE_TEST_SYSCALLS
 908	bool "Run selftest on syscall events"
 909	depends on EVENT_TRACE_STARTUP_TEST
 910	help
 911	 This option will also enable testing every syscall event.
 912	 It only enables the event and disables it and runs various loads
 913	 with the event enabled. This adds a bit more time for kernel boot
 914	 up since it runs this on every system call defined.
 915
 916	 TBD - enable a way to actually call the syscalls as we test their
 917	       events
 918
 919config RING_BUFFER_STARTUP_TEST
 920       bool "Ring buffer startup self test"
 921       depends on RING_BUFFER
 922       help
 923	 Run a simple self test on the ring buffer on boot up. Late in the
 924	 kernel boot sequence, the test will start that kicks off
 925	 a thread per cpu. Each thread will write various size events
 926	 into the ring buffer. Another thread is created to send IPIs
 927	 to each of the threads, where the IPI handler will also write
 928	 to the ring buffer, to test/stress the nesting ability.
 929	 If any anomalies are discovered, a warning will be displayed
 930	 and all ring buffers will be disabled.
 931
 932	 The test runs for 10 seconds. This will slow your boot time
 933	 by at least 10 more seconds.
 934
 935	 At the end of the test, statics and more checks are done.
 936	 It will output the stats of each per cpu buffer. What
 937	 was written, the sizes, what was read, what was lost, and
 938	 other similar details.
 939
 940	 If unsure, say N
 941
 942config RING_BUFFER_VALIDATE_TIME_DELTAS
 943	bool "Verify ring buffer time stamp deltas"
 944	depends on RING_BUFFER
 945	help
 946	  This will audit the time stamps on the ring buffer sub
 947	  buffer to make sure that all the time deltas for the
 948	  events on a sub buffer matches the current time stamp.
 949	  This audit is performed for every event that is not
 950	  interrupted, or interrupting another event. A check
 951	  is also made when traversing sub buffers to make sure
 952	  that all the deltas on the previous sub buffer do not
 953	  add up to be greater than the current time stamp.
 954
 955	  NOTE: This adds significant overhead to recording of events,
 956	  and should only be used to test the logic of the ring buffer.
 957	  Do not use it on production systems.
 958
 959	  Only say Y if you understand what this does, and you
 960	  still want it enabled. Otherwise say N
 
 961
 962config MMIOTRACE_TEST
 963	tristate "Test module for mmiotrace"
 964	depends on MMIOTRACE && m
 965	help
 966	  This is a dumb module for testing mmiotrace. It is very dangerous
 967	  as it will write garbage to IO memory starting at a given address.
 968	  However, it should be safe to use on e.g. unused portion of VRAM.
 969
 970	  Say N, unless you absolutely know what you are doing.
 
 
 
 971
 972config PREEMPTIRQ_DELAY_TEST
 973	tristate "Test module to create a preempt / IRQ disable delay thread to test latency tracers"
 974	depends on m
 975	help
 976	  Select this option to build a test module that can help test latency
 977	  tracers by executing a preempt or irq disable section with a user
 978	  configurable delay. The module busy waits for the duration of the
 979	  critical section.
 980
 981	  For example, the following invocation generates a burst of three
 982	  irq-disabled critical sections for 500us:
 983	  modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3
 984
 985	  What's more, if you want to attach the test on the cpu which the latency
 986	  tracer is running on, specify cpu_affinity=cpu_num at the end of the
 987	  command.
 988
 989	  If unsure, say N
 990
 991config SYNTH_EVENT_GEN_TEST
 992	tristate "Test module for in-kernel synthetic event generation"
 993	depends on SYNTH_EVENTS
 994	help
 995          This option creates a test module to check the base
 996          functionality of in-kernel synthetic event definition and
 997          generation.
 998
 999          To test, insert the module, and then check the trace buffer
1000	  for the generated sample events.
1001
1002	  If unsure, say N.
1003
1004config KPROBE_EVENT_GEN_TEST
1005	tristate "Test module for in-kernel kprobe event generation"
1006	depends on KPROBE_EVENTS
1007	help
1008          This option creates a test module to check the base
1009          functionality of in-kernel kprobe event definition.
1010
1011          To test, insert the module, and then check the trace buffer
1012	  for the generated kprobe events.
1013
1014	  If unsure, say N.
1015
1016config HIST_TRIGGERS_DEBUG
1017	bool "Hist trigger debug support"
1018	depends on HIST_TRIGGERS
1019	help
1020          Add "hist_debug" file for each event, which when read will
1021          dump out a bunch of internal details about the hist triggers
1022          defined on that event.
1023
1024          The hist_debug file serves a couple of purposes:
1025
1026            - Helps developers verify that nothing is broken.
1027
1028            - Provides educational information to support the details
1029              of the hist trigger internals as described by
1030              Documentation/trace/histogram-design.rst.
1031
1032          The hist_debug output only covers the data structures
1033          related to the histogram definitions themselves and doesn't
1034          display the internals of map buckets or variable values of
1035          running histograms.
1036
1037          If unsure, say N.
1038
1039endif # FTRACE
1040
1041endif # TRACING_SUPPORT
1042