Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * preemptoff and irqoff tracepoints
  4 *
  5 * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
  6 */
  7
  8#include <linux/kallsyms.h>
  9#include <linux/uaccess.h>
 10#include <linux/module.h>
 11#include <linux/ftrace.h>
 12#include <linux/kprobes.h>
 13#include <linux/hardirq.h>
 14#include "trace.h"
 15
 16#define CREATE_TRACE_POINTS
 17#include <trace/events/preemptirq.h>
 18
 19/*
 20 * Use regular trace points on architectures that implement noinstr
 21 * tooling: these calls will only happen with RCU enabled, which can
 22 * use a regular tracepoint.
 23 *
 24 * On older architectures, RCU may not be watching in idle. In that
 25 * case, wake up RCU to watch while calling the tracepoint. These
 26 * aren't NMI-safe - so exclude NMI contexts:
 27 */
 28#ifdef CONFIG_ARCH_WANTS_NO_INSTR
 29#define trace(point, args)	trace_##point(args)
 30#else
 31#define trace(point, args)					\
 32	do {							\
 33		if (trace_##point##_enabled()) {		\
 34			bool exit_rcu = false;			\
 35			if (in_nmi())				\
 36				break;				\
 37			if (!IS_ENABLED(CONFIG_TINY_RCU) &&	\
 38			    is_idle_task(current)) {		\
 39				ct_irq_enter();			\
 40				exit_rcu = true;		\
 41			}					\
 42			trace_##point(args);			\
 43			if (exit_rcu)				\
 44				ct_irq_exit();			\
 45		}						\
 46	} while (0)
 47#endif
 48
 49#ifdef CONFIG_TRACE_IRQFLAGS
 50/* Per-cpu variable to prevent redundant calls when IRQs already off */
 51static DEFINE_PER_CPU(int, tracing_irq_cpu);
 52
 53/*
 54 * Like trace_hardirqs_on() but without the lockdep invocation. This is
 55 * used in the low level entry code where the ordering vs. RCU is important
 56 * and lockdep uses a staged approach which splits the lockdep hardirq
 57 * tracking into a RCU on and a RCU off section.
 58 */
 59void trace_hardirqs_on_prepare(void)
 60{
 61	if (this_cpu_read(tracing_irq_cpu)) {
 62		trace(irq_enable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
 63		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
 64		this_cpu_write(tracing_irq_cpu, 0);
 65	}
 66}
 67EXPORT_SYMBOL(trace_hardirqs_on_prepare);
 68NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
 69
 70void trace_hardirqs_on(void)
 71{
 72	if (this_cpu_read(tracing_irq_cpu)) {
 73		trace(irq_enable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
 74		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
 75		this_cpu_write(tracing_irq_cpu, 0);
 76	}
 77
 78	lockdep_hardirqs_on_prepare();
 79	lockdep_hardirqs_on(CALLER_ADDR0);
 80}
 81EXPORT_SYMBOL(trace_hardirqs_on);
 82NOKPROBE_SYMBOL(trace_hardirqs_on);
 83
 84/*
 85 * Like trace_hardirqs_off() but without the lockdep invocation. This is
 86 * used in the low level entry code where the ordering vs. RCU is important
 87 * and lockdep uses a staged approach which splits the lockdep hardirq
 88 * tracking into a RCU on and a RCU off section.
 89 */
 90void trace_hardirqs_off_finish(void)
 91{
 92	if (!this_cpu_read(tracing_irq_cpu)) {
 93		this_cpu_write(tracing_irq_cpu, 1);
 94		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
 95		trace(irq_disable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
 96	}
 97
 98}
 99EXPORT_SYMBOL(trace_hardirqs_off_finish);
100NOKPROBE_SYMBOL(trace_hardirqs_off_finish);
101
102void trace_hardirqs_off(void)
103{
104	lockdep_hardirqs_off(CALLER_ADDR0);
105
106	if (!this_cpu_read(tracing_irq_cpu)) {
107		this_cpu_write(tracing_irq_cpu, 1);
108		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
109		trace(irq_disable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
110	}
111}
112EXPORT_SYMBOL(trace_hardirqs_off);
113NOKPROBE_SYMBOL(trace_hardirqs_off);
114#endif /* CONFIG_TRACE_IRQFLAGS */
115
116#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
117
118void trace_preempt_on(unsigned long a0, unsigned long a1)
119{
120	trace(preempt_enable, TP_ARGS(a0, a1));
121	tracer_preempt_on(a0, a1);
122}
123
124void trace_preempt_off(unsigned long a0, unsigned long a1)
125{
126	trace(preempt_disable, TP_ARGS(a0, a1));
127	tracer_preempt_off(a0, a1);
128}
129#endif
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * preemptoff and irqoff tracepoints
  4 *
  5 * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
  6 */
  7
  8#include <linux/kallsyms.h>
  9#include <linux/uaccess.h>
 10#include <linux/module.h>
 11#include <linux/ftrace.h>
 12#include <linux/kprobes.h>
 
 13#include "trace.h"
 14
 15#define CREATE_TRACE_POINTS
 16#include <trace/events/preemptirq.h>
 17
 18/*
 19 * Use regular trace points on architectures that implement noinstr
 20 * tooling: these calls will only happen with RCU enabled, which can
 21 * use a regular tracepoint.
 22 *
 23 * On older architectures, use the rcuidle tracing methods (which
 24 * aren't NMI-safe - so exclude NMI contexts):
 
 25 */
 26#ifdef CONFIG_ARCH_WANTS_NO_INSTR
 27#define trace(point)	trace_##point
 28#else
 29#define trace(point)	if (!in_nmi()) trace_##point##_rcuidle
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 30#endif
 31
 32#ifdef CONFIG_TRACE_IRQFLAGS
 33/* Per-cpu variable to prevent redundant calls when IRQs already off */
 34static DEFINE_PER_CPU(int, tracing_irq_cpu);
 35
 36/*
 37 * Like trace_hardirqs_on() but without the lockdep invocation. This is
 38 * used in the low level entry code where the ordering vs. RCU is important
 39 * and lockdep uses a staged approach which splits the lockdep hardirq
 40 * tracking into a RCU on and a RCU off section.
 41 */
 42void trace_hardirqs_on_prepare(void)
 43{
 44	if (this_cpu_read(tracing_irq_cpu)) {
 45		trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
 46		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
 47		this_cpu_write(tracing_irq_cpu, 0);
 48	}
 49}
 50EXPORT_SYMBOL(trace_hardirqs_on_prepare);
 51NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
 52
 53void trace_hardirqs_on(void)
 54{
 55	if (this_cpu_read(tracing_irq_cpu)) {
 56		trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
 57		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
 58		this_cpu_write(tracing_irq_cpu, 0);
 59	}
 60
 61	lockdep_hardirqs_on_prepare();
 62	lockdep_hardirqs_on(CALLER_ADDR0);
 63}
 64EXPORT_SYMBOL(trace_hardirqs_on);
 65NOKPROBE_SYMBOL(trace_hardirqs_on);
 66
 67/*
 68 * Like trace_hardirqs_off() but without the lockdep invocation. This is
 69 * used in the low level entry code where the ordering vs. RCU is important
 70 * and lockdep uses a staged approach which splits the lockdep hardirq
 71 * tracking into a RCU on and a RCU off section.
 72 */
 73void trace_hardirqs_off_finish(void)
 74{
 75	if (!this_cpu_read(tracing_irq_cpu)) {
 76		this_cpu_write(tracing_irq_cpu, 1);
 77		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
 78		trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
 79	}
 80
 81}
 82EXPORT_SYMBOL(trace_hardirqs_off_finish);
 83NOKPROBE_SYMBOL(trace_hardirqs_off_finish);
 84
 85void trace_hardirqs_off(void)
 86{
 87	lockdep_hardirqs_off(CALLER_ADDR0);
 88
 89	if (!this_cpu_read(tracing_irq_cpu)) {
 90		this_cpu_write(tracing_irq_cpu, 1);
 91		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
 92		trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
 93	}
 94}
 95EXPORT_SYMBOL(trace_hardirqs_off);
 96NOKPROBE_SYMBOL(trace_hardirqs_off);
 97#endif /* CONFIG_TRACE_IRQFLAGS */
 98
 99#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
100
101void trace_preempt_on(unsigned long a0, unsigned long a1)
102{
103	trace(preempt_enable)(a0, a1);
104	tracer_preempt_on(a0, a1);
105}
106
107void trace_preempt_off(unsigned long a0, unsigned long a1)
108{
109	trace(preempt_disable)(a0, a1);
110	tracer_preempt_off(a0, a1);
111}
112#endif