Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * preemptoff and irqoff tracepoints
  4 *
  5 * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
  6 */
  7
  8#include <linux/kallsyms.h>
  9#include <linux/uaccess.h>
 10#include <linux/module.h>
 11#include <linux/ftrace.h>
 12#include <linux/kprobes.h>
 
 13#include "trace.h"
 14
 15#define CREATE_TRACE_POINTS
 16#include <trace/events/preemptirq.h>
 17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 18#ifdef CONFIG_TRACE_IRQFLAGS
 19/* Per-cpu variable to prevent redundant calls when IRQs already off */
 20static DEFINE_PER_CPU(int, tracing_irq_cpu);
 21
 22/*
 23 * Like trace_hardirqs_on() but without the lockdep invocation. This is
 24 * used in the low level entry code where the ordering vs. RCU is important
 25 * and lockdep uses a staged approach which splits the lockdep hardirq
 26 * tracking into a RCU on and a RCU off section.
 27 */
 28void trace_hardirqs_on_prepare(void)
 29{
 30	if (this_cpu_read(tracing_irq_cpu)) {
 31		if (!in_nmi())
 32			trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
 33		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
 34		this_cpu_write(tracing_irq_cpu, 0);
 35	}
 36}
 37EXPORT_SYMBOL(trace_hardirqs_on_prepare);
 38NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
 39
 40void trace_hardirqs_on(void)
 41{
 42	if (this_cpu_read(tracing_irq_cpu)) {
 43		if (!in_nmi())
 44			trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
 45		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
 46		this_cpu_write(tracing_irq_cpu, 0);
 47	}
 48
 49	lockdep_hardirqs_on_prepare();
 50	lockdep_hardirqs_on(CALLER_ADDR0);
 51}
 52EXPORT_SYMBOL(trace_hardirqs_on);
 53NOKPROBE_SYMBOL(trace_hardirqs_on);
 54
 55/*
 56 * Like trace_hardirqs_off() but without the lockdep invocation. This is
 57 * used in the low level entry code where the ordering vs. RCU is important
 58 * and lockdep uses a staged approach which splits the lockdep hardirq
 59 * tracking into a RCU on and a RCU off section.
 60 */
 61void trace_hardirqs_off_finish(void)
 62{
 63	if (!this_cpu_read(tracing_irq_cpu)) {
 64		this_cpu_write(tracing_irq_cpu, 1);
 65		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
 66		if (!in_nmi())
 67			trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
 68	}
 69
 70}
 71EXPORT_SYMBOL(trace_hardirqs_off_finish);
 72NOKPROBE_SYMBOL(trace_hardirqs_off_finish);
 73
 74void trace_hardirqs_off(void)
 75{
 76	lockdep_hardirqs_off(CALLER_ADDR0);
 77
 78	if (!this_cpu_read(tracing_irq_cpu)) {
 79		this_cpu_write(tracing_irq_cpu, 1);
 80		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
 81		if (!in_nmi())
 82			trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
 83	}
 84}
 85EXPORT_SYMBOL(trace_hardirqs_off);
 86NOKPROBE_SYMBOL(trace_hardirqs_off);
 87
 88__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
 89{
 90	if (this_cpu_read(tracing_irq_cpu)) {
 91		if (!in_nmi())
 92			trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
 93		tracer_hardirqs_on(CALLER_ADDR0, caller_addr);
 94		this_cpu_write(tracing_irq_cpu, 0);
 95	}
 96
 97	lockdep_hardirqs_on_prepare();
 98	lockdep_hardirqs_on(caller_addr);
 99}
100EXPORT_SYMBOL(trace_hardirqs_on_caller);
101NOKPROBE_SYMBOL(trace_hardirqs_on_caller);
102
103__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
104{
105	lockdep_hardirqs_off(caller_addr);
106
107	if (!this_cpu_read(tracing_irq_cpu)) {
108		this_cpu_write(tracing_irq_cpu, 1);
109		tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
110		if (!in_nmi())
111			trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
112	}
113}
114EXPORT_SYMBOL(trace_hardirqs_off_caller);
115NOKPROBE_SYMBOL(trace_hardirqs_off_caller);
116#endif /* CONFIG_TRACE_IRQFLAGS */
117
118#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
119
120void trace_preempt_on(unsigned long a0, unsigned long a1)
121{
122	if (!in_nmi())
123		trace_preempt_enable_rcuidle(a0, a1);
124	tracer_preempt_on(a0, a1);
125}
126
127void trace_preempt_off(unsigned long a0, unsigned long a1)
128{
129	if (!in_nmi())
130		trace_preempt_disable_rcuidle(a0, a1);
131	tracer_preempt_off(a0, a1);
132}
133#endif
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * preemptoff and irqoff tracepoints
  4 *
  5 * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
  6 */
  7
  8#include <linux/kallsyms.h>
  9#include <linux/uaccess.h>
 10#include <linux/module.h>
 11#include <linux/ftrace.h>
 12#include <linux/kprobes.h>
 13#include <linux/hardirq.h>
 14#include "trace.h"
 15
 16#define CREATE_TRACE_POINTS
 17#include <trace/events/preemptirq.h>
 18
 19/*
 20 * Use regular trace points on architectures that implement noinstr
 21 * tooling: these calls will only happen with RCU enabled, which can
 22 * use a regular tracepoint.
 23 *
 24 * On older architectures, RCU may not be watching in idle. In that
 25 * case, wake up RCU to watch while calling the tracepoint. These
 26 * aren't NMI-safe - so exclude NMI contexts:
 27 */
 28#ifdef CONFIG_ARCH_WANTS_NO_INSTR
 29#define trace(point, args)	trace_##point(args)
 30#else
 31#define trace(point, args)					\
 32	do {							\
 33		if (trace_##point##_enabled()) {		\
 34			bool exit_rcu = false;			\
 35			if (in_nmi())				\
 36				break;				\
 37			if (!IS_ENABLED(CONFIG_TINY_RCU) &&	\
 38			    is_idle_task(current)) {		\
 39				ct_irq_enter();			\
 40				exit_rcu = true;		\
 41			}					\
 42			trace_##point(args);			\
 43			if (exit_rcu)				\
 44				ct_irq_exit();			\
 45		}						\
 46	} while (0)
 47#endif
 48
 49#ifdef CONFIG_TRACE_IRQFLAGS
 50/* Per-cpu variable to prevent redundant calls when IRQs already off */
 51static DEFINE_PER_CPU(int, tracing_irq_cpu);
 52
 53/*
 54 * Like trace_hardirqs_on() but without the lockdep invocation. This is
 55 * used in the low level entry code where the ordering vs. RCU is important
 56 * and lockdep uses a staged approach which splits the lockdep hardirq
 57 * tracking into a RCU on and a RCU off section.
 58 */
 59void trace_hardirqs_on_prepare(void)
 60{
 61	if (this_cpu_read(tracing_irq_cpu)) {
 62		trace(irq_enable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
 
 63		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
 64		this_cpu_write(tracing_irq_cpu, 0);
 65	}
 66}
 67EXPORT_SYMBOL(trace_hardirqs_on_prepare);
 68NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
 69
 70void trace_hardirqs_on(void)
 71{
 72	if (this_cpu_read(tracing_irq_cpu)) {
 73		trace(irq_enable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
 
 74		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
 75		this_cpu_write(tracing_irq_cpu, 0);
 76	}
 77
 78	lockdep_hardirqs_on_prepare();
 79	lockdep_hardirqs_on(CALLER_ADDR0);
 80}
 81EXPORT_SYMBOL(trace_hardirqs_on);
 82NOKPROBE_SYMBOL(trace_hardirqs_on);
 83
 84/*
 85 * Like trace_hardirqs_off() but without the lockdep invocation. This is
 86 * used in the low level entry code where the ordering vs. RCU is important
 87 * and lockdep uses a staged approach which splits the lockdep hardirq
 88 * tracking into a RCU on and a RCU off section.
 89 */
 90void trace_hardirqs_off_finish(void)
 91{
 92	if (!this_cpu_read(tracing_irq_cpu)) {
 93		this_cpu_write(tracing_irq_cpu, 1);
 94		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
 95		trace(irq_disable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
 
 96	}
 97
 98}
 99EXPORT_SYMBOL(trace_hardirqs_off_finish);
100NOKPROBE_SYMBOL(trace_hardirqs_off_finish);
101
102void trace_hardirqs_off(void)
103{
104	lockdep_hardirqs_off(CALLER_ADDR0);
105
106	if (!this_cpu_read(tracing_irq_cpu)) {
107		this_cpu_write(tracing_irq_cpu, 1);
108		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
109		trace(irq_disable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
 
110	}
111}
112EXPORT_SYMBOL(trace_hardirqs_off);
113NOKPROBE_SYMBOL(trace_hardirqs_off);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114#endif /* CONFIG_TRACE_IRQFLAGS */
115
116#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
117
118void trace_preempt_on(unsigned long a0, unsigned long a1)
119{
120	trace(preempt_enable, TP_ARGS(a0, a1));
 
121	tracer_preempt_on(a0, a1);
122}
123
124void trace_preempt_off(unsigned long a0, unsigned long a1)
125{
126	trace(preempt_disable, TP_ARGS(a0, a1));
 
127	tracer_preempt_off(a0, a1);
128}
129#endif