Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * preemptoff and irqoff tracepoints
  4 *
  5 * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
  6 */
  7
  8#include <linux/kallsyms.h>
  9#include <linux/uaccess.h>
 10#include <linux/module.h>
 11#include <linux/ftrace.h>
 12#include <linux/kprobes.h>
 13#include "trace.h"
 14
 15#define CREATE_TRACE_POINTS
 16#include <trace/events/preemptirq.h>
 17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 18#ifdef CONFIG_TRACE_IRQFLAGS
 19/* Per-cpu variable to prevent redundant calls when IRQs already off */
 20static DEFINE_PER_CPU(int, tracing_irq_cpu);
 21
 22/*
 23 * Like trace_hardirqs_on() but without the lockdep invocation. This is
 24 * used in the low level entry code where the ordering vs. RCU is important
 25 * and lockdep uses a staged approach which splits the lockdep hardirq
 26 * tracking into a RCU on and a RCU off section.
 27 */
 28void trace_hardirqs_on_prepare(void)
 29{
 30	if (this_cpu_read(tracing_irq_cpu)) {
 31		if (!in_nmi())
 32			trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
 33		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
 34		this_cpu_write(tracing_irq_cpu, 0);
 35	}
 36}
 37EXPORT_SYMBOL(trace_hardirqs_on_prepare);
 38NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
 39
 40void trace_hardirqs_on(void)
 41{
 42	if (this_cpu_read(tracing_irq_cpu)) {
 43		if (!in_nmi())
 44			trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
 45		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
 46		this_cpu_write(tracing_irq_cpu, 0);
 47	}
 48
 49	lockdep_hardirqs_on_prepare();
 50	lockdep_hardirqs_on(CALLER_ADDR0);
 51}
 52EXPORT_SYMBOL(trace_hardirqs_on);
 53NOKPROBE_SYMBOL(trace_hardirqs_on);
 54
 55/*
 56 * Like trace_hardirqs_off() but without the lockdep invocation. This is
 57 * used in the low level entry code where the ordering vs. RCU is important
 58 * and lockdep uses a staged approach which splits the lockdep hardirq
 59 * tracking into a RCU on and a RCU off section.
 60 */
 61void trace_hardirqs_off_finish(void)
 62{
 63	if (!this_cpu_read(tracing_irq_cpu)) {
 64		this_cpu_write(tracing_irq_cpu, 1);
 65		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
 66		if (!in_nmi())
 67			trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
 68	}
 69
 70}
 71EXPORT_SYMBOL(trace_hardirqs_off_finish);
 72NOKPROBE_SYMBOL(trace_hardirqs_off_finish);
 73
 74void trace_hardirqs_off(void)
 75{
 76	lockdep_hardirqs_off(CALLER_ADDR0);
 77
 78	if (!this_cpu_read(tracing_irq_cpu)) {
 79		this_cpu_write(tracing_irq_cpu, 1);
 80		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
 81		if (!in_nmi())
 82			trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
 83	}
 84}
 85EXPORT_SYMBOL(trace_hardirqs_off);
 86NOKPROBE_SYMBOL(trace_hardirqs_off);
 87
 88__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
 89{
 90	if (this_cpu_read(tracing_irq_cpu)) {
 91		if (!in_nmi())
 92			trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
 93		tracer_hardirqs_on(CALLER_ADDR0, caller_addr);
 94		this_cpu_write(tracing_irq_cpu, 0);
 95	}
 96
 97	lockdep_hardirqs_on_prepare();
 98	lockdep_hardirqs_on(caller_addr);
 99}
100EXPORT_SYMBOL(trace_hardirqs_on_caller);
101NOKPROBE_SYMBOL(trace_hardirqs_on_caller);
102
103__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
104{
105	lockdep_hardirqs_off(caller_addr);
106
107	if (!this_cpu_read(tracing_irq_cpu)) {
108		this_cpu_write(tracing_irq_cpu, 1);
109		tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
110		if (!in_nmi())
111			trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
112	}
113}
114EXPORT_SYMBOL(trace_hardirqs_off_caller);
115NOKPROBE_SYMBOL(trace_hardirqs_off_caller);
116#endif /* CONFIG_TRACE_IRQFLAGS */
117
118#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
119
120void trace_preempt_on(unsigned long a0, unsigned long a1)
121{
122	if (!in_nmi())
123		trace_preempt_enable_rcuidle(a0, a1);
124	tracer_preempt_on(a0, a1);
125}
126
127void trace_preempt_off(unsigned long a0, unsigned long a1)
128{
129	if (!in_nmi())
130		trace_preempt_disable_rcuidle(a0, a1);
131	tracer_preempt_off(a0, a1);
132}
133#endif
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * preemptoff and irqoff tracepoints
  4 *
  5 * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
  6 */
  7
  8#include <linux/kallsyms.h>
  9#include <linux/uaccess.h>
 10#include <linux/module.h>
 11#include <linux/ftrace.h>
 12#include <linux/kprobes.h>
 13#include "trace.h"
 14
 15#define CREATE_TRACE_POINTS
 16#include <trace/events/preemptirq.h>
 17
 18/*
 19 * Use regular trace points on architectures that implement noinstr
 20 * tooling: these calls will only happen with RCU enabled, which can
 21 * use a regular tracepoint.
 22 *
 23 * On older architectures, use the rcuidle tracing methods (which
 24 * aren't NMI-safe - so exclude NMI contexts):
 25 */
 26#ifdef CONFIG_ARCH_WANTS_NO_INSTR
 27#define trace(point)	trace_##point
 28#else
 29#define trace(point)	if (!in_nmi()) trace_##point##_rcuidle
 30#endif
 31
 32#ifdef CONFIG_TRACE_IRQFLAGS
 33/* Per-cpu variable to prevent redundant calls when IRQs already off */
 34static DEFINE_PER_CPU(int, tracing_irq_cpu);
 35
 36/*
 37 * Like trace_hardirqs_on() but without the lockdep invocation. This is
 38 * used in the low level entry code where the ordering vs. RCU is important
 39 * and lockdep uses a staged approach which splits the lockdep hardirq
 40 * tracking into a RCU on and a RCU off section.
 41 */
 42void trace_hardirqs_on_prepare(void)
 43{
 44	if (this_cpu_read(tracing_irq_cpu)) {
 45		trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
 
 46		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
 47		this_cpu_write(tracing_irq_cpu, 0);
 48	}
 49}
 50EXPORT_SYMBOL(trace_hardirqs_on_prepare);
 51NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
 52
 53void trace_hardirqs_on(void)
 54{
 55	if (this_cpu_read(tracing_irq_cpu)) {
 56		trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
 
 57		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
 58		this_cpu_write(tracing_irq_cpu, 0);
 59	}
 60
 61	lockdep_hardirqs_on_prepare();
 62	lockdep_hardirqs_on(CALLER_ADDR0);
 63}
 64EXPORT_SYMBOL(trace_hardirqs_on);
 65NOKPROBE_SYMBOL(trace_hardirqs_on);
 66
 67/*
 68 * Like trace_hardirqs_off() but without the lockdep invocation. This is
 69 * used in the low level entry code where the ordering vs. RCU is important
 70 * and lockdep uses a staged approach which splits the lockdep hardirq
 71 * tracking into a RCU on and a RCU off section.
 72 */
 73void trace_hardirqs_off_finish(void)
 74{
 75	if (!this_cpu_read(tracing_irq_cpu)) {
 76		this_cpu_write(tracing_irq_cpu, 1);
 77		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
 78		trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
 
 79	}
 80
 81}
 82EXPORT_SYMBOL(trace_hardirqs_off_finish);
 83NOKPROBE_SYMBOL(trace_hardirqs_off_finish);
 84
 85void trace_hardirqs_off(void)
 86{
 87	lockdep_hardirqs_off(CALLER_ADDR0);
 88
 89	if (!this_cpu_read(tracing_irq_cpu)) {
 90		this_cpu_write(tracing_irq_cpu, 1);
 91		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
 92		trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
 
 93	}
 94}
 95EXPORT_SYMBOL(trace_hardirqs_off);
 96NOKPROBE_SYMBOL(trace_hardirqs_off);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 97#endif /* CONFIG_TRACE_IRQFLAGS */
 98
 99#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
100
101void trace_preempt_on(unsigned long a0, unsigned long a1)
102{
103	trace(preempt_enable)(a0, a1);
 
104	tracer_preempt_on(a0, a1);
105}
106
107void trace_preempt_off(unsigned long a0, unsigned long a1)
108{
109	trace(preempt_disable)(a0, a1);
 
110	tracer_preempt_off(a0, a1);
111}
112#endif