Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v4.6
  1/* Copyright (c) 2016 Facebook
  2 *
  3 * This program is free software; you can redistribute it and/or
  4 * modify it under the terms of version 2 of the GNU General Public
  5 * License as published by the Free Software Foundation.
  6 */
  7#include <uapi/linux/bpf.h>
  8#include "bpf_helpers.h"
  9#include <uapi/linux/ptrace.h>
 10#include <uapi/linux/perf_event.h>
 11#include <linux/version.h>
 12#include <linux/sched.h>
 
 
 13
 14#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;})
 
 
 
 
 
 15
 16#define MINBLOCK_US	1
 17
 18struct key_t {
 19	char waker[TASK_COMM_LEN];
 20	char target[TASK_COMM_LEN];
 21	u32 wret;
 22	u32 tret;
 23};
 24
 25struct bpf_map_def SEC("maps") counts = {
 26	.type = BPF_MAP_TYPE_HASH,
 27	.key_size = sizeof(struct key_t),
 28	.value_size = sizeof(u64),
 29	.max_entries = 10000,
 30};
 31
 32struct bpf_map_def SEC("maps") start = {
 33	.type = BPF_MAP_TYPE_HASH,
 34	.key_size = sizeof(u32),
 35	.value_size = sizeof(u64),
 36	.max_entries = 10000,
 37};
 38
 39struct wokeby_t {
 40	char name[TASK_COMM_LEN];
 41	u32 ret;
 42};
 43
 44struct bpf_map_def SEC("maps") wokeby = {
 45	.type = BPF_MAP_TYPE_HASH,
 46	.key_size = sizeof(u32),
 47	.value_size = sizeof(struct wokeby_t),
 48	.max_entries = 10000,
 49};
 50
 51struct bpf_map_def SEC("maps") stackmap = {
 52	.type = BPF_MAP_TYPE_STACK_TRACE,
 53	.key_size = sizeof(u32),
 54	.value_size = PERF_MAX_STACK_DEPTH * sizeof(u64),
 55	.max_entries = 10000,
 56};
 57
 58#define STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP)
 59
 60SEC("kprobe/try_to_wake_up")
 61int waker(struct pt_regs *ctx)
 62{
 63	struct task_struct *p = (void *) PT_REGS_PARM1(ctx);
 64	struct wokeby_t woke = {};
 65	u32 pid;
 66
 67	pid = _(p->pid);
 68
 69	bpf_get_current_comm(&woke.name, sizeof(woke.name));
 70	woke.ret = bpf_get_stackid(ctx, &stackmap, STACKID_FLAGS);
 71
 72	bpf_map_update_elem(&wokeby, &pid, &woke, BPF_ANY);
 73	return 0;
 74}
 75
 76static inline int update_counts(struct pt_regs *ctx, u32 pid, u64 delta)
 77{
 78	struct key_t key = {};
 79	struct wokeby_t *woke;
 80	u64 zero = 0, *val;
 
 81
 
 82	bpf_get_current_comm(&key.target, sizeof(key.target));
 83	key.tret = bpf_get_stackid(ctx, &stackmap, STACKID_FLAGS);
 
 84
 85	woke = bpf_map_lookup_elem(&wokeby, &pid);
 86	if (woke) {
 87		key.wret = woke->ret;
 88		__builtin_memcpy(&key.waker, woke->name, TASK_COMM_LEN);
 89		bpf_map_delete_elem(&wokeby, &pid);
 90	}
 91
 92	val = bpf_map_lookup_elem(&counts, &key);
 93	if (!val) {
 94		bpf_map_update_elem(&counts, &key, &zero, BPF_NOEXIST);
 95		val = bpf_map_lookup_elem(&counts, &key);
 96		if (!val)
 97			return 0;
 98	}
 99	(*val) += delta;
100	return 0;
101}
102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103SEC("kprobe/finish_task_switch")
104int oncpu(struct pt_regs *ctx)
105{
106	struct task_struct *p = (void *) PT_REGS_PARM1(ctx);
 
 
 
107	u64 delta, ts, *tsp;
108	u32 pid;
109
110	/* record previous thread sleep time */
111	pid = _(p->pid);
112	ts = bpf_ktime_get_ns();
113	bpf_map_update_elem(&start, &pid, &ts, BPF_ANY);
114
115	/* calculate current thread's delta time */
116	pid = bpf_get_current_pid_tgid();
117	tsp = bpf_map_lookup_elem(&start, &pid);
118	if (!tsp)
119		/* missed start or filtered */
120		return 0;
121
122	delta = bpf_ktime_get_ns() - *tsp;
123	bpf_map_delete_elem(&start, &pid);
124	delta = delta / 1000;
125	if (delta < MINBLOCK_US)
126		return 0;
127
128	return update_counts(ctx, pid, delta);
129}
130char _license[] SEC("license") = "GPL";
131u32 _version SEC("version") = LINUX_VERSION_CODE;
v5.9
  1/* Copyright (c) 2016 Facebook
  2 *
  3 * This program is free software; you can redistribute it and/or
  4 * modify it under the terms of version 2 of the GNU General Public
  5 * License as published by the Free Software Foundation.
  6 */
  7#include <uapi/linux/bpf.h>
 
  8#include <uapi/linux/ptrace.h>
  9#include <uapi/linux/perf_event.h>
 10#include <linux/version.h>
 11#include <linux/sched.h>
 12#include <bpf/bpf_helpers.h>
 13#include <bpf/bpf_tracing.h>
 14
 15#define _(P)                                                                   \
 16	({                                                                     \
 17		typeof(P) val;                                                 \
 18		bpf_probe_read_kernel(&val, sizeof(val), &(P));                \
 19		val;                                                           \
 20	})
 21
 22#define MINBLOCK_US	1
 23
 24struct key_t {
 25	char waker[TASK_COMM_LEN];
 26	char target[TASK_COMM_LEN];
 27	u32 wret;
 28	u32 tret;
 29};
 30
 31struct bpf_map_def SEC("maps") counts = {
 32	.type = BPF_MAP_TYPE_HASH,
 33	.key_size = sizeof(struct key_t),
 34	.value_size = sizeof(u64),
 35	.max_entries = 10000,
 36};
 37
 38struct bpf_map_def SEC("maps") start = {
 39	.type = BPF_MAP_TYPE_HASH,
 40	.key_size = sizeof(u32),
 41	.value_size = sizeof(u64),
 42	.max_entries = 10000,
 43};
 44
 45struct wokeby_t {
 46	char name[TASK_COMM_LEN];
 47	u32 ret;
 48};
 49
 50struct bpf_map_def SEC("maps") wokeby = {
 51	.type = BPF_MAP_TYPE_HASH,
 52	.key_size = sizeof(u32),
 53	.value_size = sizeof(struct wokeby_t),
 54	.max_entries = 10000,
 55};
 56
 57struct bpf_map_def SEC("maps") stackmap = {
 58	.type = BPF_MAP_TYPE_STACK_TRACE,
 59	.key_size = sizeof(u32),
 60	.value_size = PERF_MAX_STACK_DEPTH * sizeof(u64),
 61	.max_entries = 10000,
 62};
 63
 64#define STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP)
 65
 66SEC("kprobe/try_to_wake_up")
 67int waker(struct pt_regs *ctx)
 68{
 69	struct task_struct *p = (void *) PT_REGS_PARM1(ctx);
 70	struct wokeby_t woke;
 71	u32 pid;
 72
 73	pid = _(p->pid);
 74
 75	bpf_get_current_comm(&woke.name, sizeof(woke.name));
 76	woke.ret = bpf_get_stackid(ctx, &stackmap, STACKID_FLAGS);
 77
 78	bpf_map_update_elem(&wokeby, &pid, &woke, BPF_ANY);
 79	return 0;
 80}
 81
 82static inline int update_counts(void *ctx, u32 pid, u64 delta)
 83{
 
 84	struct wokeby_t *woke;
 85	u64 zero = 0, *val;
 86	struct key_t key;
 87
 88	__builtin_memset(&key.waker, 0, sizeof(key.waker));
 89	bpf_get_current_comm(&key.target, sizeof(key.target));
 90	key.tret = bpf_get_stackid(ctx, &stackmap, STACKID_FLAGS);
 91	key.wret = 0;
 92
 93	woke = bpf_map_lookup_elem(&wokeby, &pid);
 94	if (woke) {
 95		key.wret = woke->ret;
 96		__builtin_memcpy(&key.waker, woke->name, sizeof(key.waker));
 97		bpf_map_delete_elem(&wokeby, &pid);
 98	}
 99
100	val = bpf_map_lookup_elem(&counts, &key);
101	if (!val) {
102		bpf_map_update_elem(&counts, &key, &zero, BPF_NOEXIST);
103		val = bpf_map_lookup_elem(&counts, &key);
104		if (!val)
105			return 0;
106	}
107	(*val) += delta;
108	return 0;
109}
110
111#if 1
112/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
113struct sched_switch_args {
114	unsigned long long pad;
115	char prev_comm[16];
116	int prev_pid;
117	int prev_prio;
118	long long prev_state;
119	char next_comm[16];
120	int next_pid;
121	int next_prio;
122};
123SEC("tracepoint/sched/sched_switch")
124int oncpu(struct sched_switch_args *ctx)
125{
126	/* record previous thread sleep time */
127	u32 pid = ctx->prev_pid;
128#else
129SEC("kprobe/finish_task_switch")
130int oncpu(struct pt_regs *ctx)
131{
132	struct task_struct *p = (void *) PT_REGS_PARM1(ctx);
133	/* record previous thread sleep time */
134	u32 pid = _(p->pid);
135#endif
136	u64 delta, ts, *tsp;
 
137
 
 
138	ts = bpf_ktime_get_ns();
139	bpf_map_update_elem(&start, &pid, &ts, BPF_ANY);
140
141	/* calculate current thread's delta time */
142	pid = bpf_get_current_pid_tgid();
143	tsp = bpf_map_lookup_elem(&start, &pid);
144	if (!tsp)
145		/* missed start or filtered */
146		return 0;
147
148	delta = bpf_ktime_get_ns() - *tsp;
149	bpf_map_delete_elem(&start, &pid);
150	delta = delta / 1000;
151	if (delta < MINBLOCK_US)
152		return 0;
153
154	return update_counts(ctx, pid, delta);
155}
156char _license[] SEC("license") = "GPL";
157u32 _version SEC("version") = LINUX_VERSION_CODE;