Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/* SPDX-License-Identifier: GPL-2.0
  2 *  Copyright(c) 2017-2018 Jesper Dangaard Brouer, Red Hat Inc.
  3 *
  4 * XDP monitor tool, based on tracepoints
  5 */
  6#include <uapi/linux/bpf.h>
  7#include "bpf_helpers.h"
  8
  9struct bpf_map_def SEC("maps") redirect_err_cnt = {
 10	.type = BPF_MAP_TYPE_PERCPU_ARRAY,
 11	.key_size = sizeof(u32),
 12	.value_size = sizeof(u64),
 13	.max_entries = 2,
 14	/* TODO: have entries for all possible errno's */
 15};
 16
 17#define XDP_UNKNOWN	XDP_REDIRECT + 1
 18struct bpf_map_def SEC("maps") exception_cnt = {
 19	.type		= BPF_MAP_TYPE_PERCPU_ARRAY,
 20	.key_size	= sizeof(u32),
 21	.value_size	= sizeof(u64),
 22	.max_entries	= XDP_UNKNOWN + 1,
 23};
 24
 25/* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format
 26 * Code in:                kernel/include/trace/events/xdp.h
 27 */
 28struct xdp_redirect_ctx {
 29	u64 __pad;		// First 8 bytes are not accessible by bpf code
 30	int prog_id;		//	offset:8;  size:4; signed:1;
 31	u32 act;		//	offset:12  size:4; signed:0;
 32	int ifindex;		//	offset:16  size:4; signed:1;
 33	int err;		//	offset:20  size:4; signed:1;
 34	int to_ifindex;		//	offset:24  size:4; signed:1;
 35	u32 map_id;		//	offset:28  size:4; signed:0;
 36	int map_index;		//	offset:32  size:4; signed:1;
 37};				//	offset:36
 38
 39enum {
 40	XDP_REDIRECT_SUCCESS = 0,
 41	XDP_REDIRECT_ERROR = 1
 42};
 43
 44static __always_inline
 45int xdp_redirect_collect_stat(struct xdp_redirect_ctx *ctx)
 46{
 47	u32 key = XDP_REDIRECT_ERROR;
 48	int err = ctx->err;
 49	u64 *cnt;
 50
 51	if (!err)
 52		key = XDP_REDIRECT_SUCCESS;
 53
 54	cnt  = bpf_map_lookup_elem(&redirect_err_cnt, &key);
 55	if (!cnt)
 56		return 1;
 57	*cnt += 1;
 58
 59	return 0; /* Indicate event was filtered (no further processing)*/
 60	/*
 61	 * Returning 1 here would allow e.g. a perf-record tracepoint
 62	 * to see and record these events, but it doesn't work well
 63	 * in-practice as stopping perf-record also unload this
 64	 * bpf_prog.  Plus, there is additional overhead of doing so.
 65	 */
 66}
 67
 68SEC("tracepoint/xdp/xdp_redirect_err")
 69int trace_xdp_redirect_err(struct xdp_redirect_ctx *ctx)
 70{
 71	return xdp_redirect_collect_stat(ctx);
 72}
 73
 74
 75SEC("tracepoint/xdp/xdp_redirect_map_err")
 76int trace_xdp_redirect_map_err(struct xdp_redirect_ctx *ctx)
 77{
 78	return xdp_redirect_collect_stat(ctx);
 79}
 80
 81/* Likely unloaded when prog starts */
 82SEC("tracepoint/xdp/xdp_redirect")
 83int trace_xdp_redirect(struct xdp_redirect_ctx *ctx)
 84{
 85	return xdp_redirect_collect_stat(ctx);
 86}
 87
 88/* Likely unloaded when prog starts */
 89SEC("tracepoint/xdp/xdp_redirect_map")
 90int trace_xdp_redirect_map(struct xdp_redirect_ctx *ctx)
 91{
 92	return xdp_redirect_collect_stat(ctx);
 93}
 94
 95/* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_exception/format
 96 * Code in:                kernel/include/trace/events/xdp.h
 97 */
 98struct xdp_exception_ctx {
 99	u64 __pad;	// First 8 bytes are not accessible by bpf code
100	int prog_id;	//	offset:8;  size:4; signed:1;
101	u32 act;	//	offset:12; size:4; signed:0;
102	int ifindex;	//	offset:16; size:4; signed:1;
103};
104
105SEC("tracepoint/xdp/xdp_exception")
106int trace_xdp_exception(struct xdp_exception_ctx *ctx)
107{
108	u64 *cnt;
109	u32 key;
110
111	key = ctx->act;
112	if (key > XDP_REDIRECT)
113		key = XDP_UNKNOWN;
114
115	cnt = bpf_map_lookup_elem(&exception_cnt, &key);
116	if (!cnt)
117		return 1;
118	*cnt += 1;
119
120	return 0;
121}
122
123/* Common stats data record shared with _user.c */
124struct datarec {
125	u64 processed;
126	u64 dropped;
127	u64 info;
128	u64 err;
129};
130#define MAX_CPUS 64
131
132struct bpf_map_def SEC("maps") cpumap_enqueue_cnt = {
133	.type		= BPF_MAP_TYPE_PERCPU_ARRAY,
134	.key_size	= sizeof(u32),
135	.value_size	= sizeof(struct datarec),
136	.max_entries	= MAX_CPUS,
137};
138
139struct bpf_map_def SEC("maps") cpumap_kthread_cnt = {
140	.type		= BPF_MAP_TYPE_PERCPU_ARRAY,
141	.key_size	= sizeof(u32),
142	.value_size	= sizeof(struct datarec),
143	.max_entries	= 1,
144};
145
146/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format
147 * Code in:         kernel/include/trace/events/xdp.h
148 */
149struct cpumap_enqueue_ctx {
150	u64 __pad;		// First 8 bytes are not accessible by bpf code
151	int map_id;		//	offset:8;  size:4; signed:1;
152	u32 act;		//	offset:12; size:4; signed:0;
153	int cpu;		//	offset:16; size:4; signed:1;
154	unsigned int drops;	//	offset:20; size:4; signed:0;
155	unsigned int processed;	//	offset:24; size:4; signed:0;
156	int to_cpu;		//	offset:28; size:4; signed:1;
157};
158
159SEC("tracepoint/xdp/xdp_cpumap_enqueue")
160int trace_xdp_cpumap_enqueue(struct cpumap_enqueue_ctx *ctx)
161{
162	u32 to_cpu = ctx->to_cpu;
163	struct datarec *rec;
164
165	if (to_cpu >= MAX_CPUS)
166		return 1;
167
168	rec = bpf_map_lookup_elem(&cpumap_enqueue_cnt, &to_cpu);
169	if (!rec)
170		return 0;
171	rec->processed += ctx->processed;
172	rec->dropped   += ctx->drops;
173
174	/* Record bulk events, then userspace can calc average bulk size */
175	if (ctx->processed > 0)
176		rec->info += 1;
177
178	return 0;
179}
180
181/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_kthread/format
182 * Code in:         kernel/include/trace/events/xdp.h
183 */
184struct cpumap_kthread_ctx {
185	u64 __pad;		// First 8 bytes are not accessible by bpf code
186	int map_id;		//	offset:8;  size:4; signed:1;
187	u32 act;		//	offset:12; size:4; signed:0;
188	int cpu;		//	offset:16; size:4; signed:1;
189	unsigned int drops;	//	offset:20; size:4; signed:0;
190	unsigned int processed;	//	offset:24; size:4; signed:0;
191	int sched;		//	offset:28; size:4; signed:1;
192};
193
194SEC("tracepoint/xdp/xdp_cpumap_kthread")
195int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx *ctx)
196{
197	struct datarec *rec;
198	u32 key = 0;
199
200	rec = bpf_map_lookup_elem(&cpumap_kthread_cnt, &key);
201	if (!rec)
202		return 0;
203	rec->processed += ctx->processed;
204	rec->dropped   += ctx->drops;
205
206	/* Count times kthread yielded CPU via schedule call */
207	if (ctx->sched)
208		rec->info++;
209
210	return 0;
211}
212
213struct bpf_map_def SEC("maps") devmap_xmit_cnt = {
214	.type		= BPF_MAP_TYPE_PERCPU_ARRAY,
215	.key_size	= sizeof(u32),
216	.value_size	= sizeof(struct datarec),
217	.max_entries	= 1,
218};
219
220/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_devmap_xmit/format
221 * Code in:         kernel/include/trace/events/xdp.h
222 */
223struct devmap_xmit_ctx {
224	u64 __pad;		// First 8 bytes are not accessible by bpf code
225	int map_id;		//	offset:8;  size:4; signed:1;
226	u32 act;		//	offset:12; size:4; signed:0;
227	u32 map_index;		//	offset:16; size:4; signed:0;
228	int drops;		//	offset:20; size:4; signed:1;
229	int sent;		//	offset:24; size:4; signed:1;
230	int from_ifindex;	//	offset:28; size:4; signed:1;
231	int to_ifindex;		//	offset:32; size:4; signed:1;
232	int err;		//	offset:36; size:4; signed:1;
233};
234
235SEC("tracepoint/xdp/xdp_devmap_xmit")
236int trace_xdp_devmap_xmit(struct devmap_xmit_ctx *ctx)
237{
238	struct datarec *rec;
239	u32 key = 0;
240
241	rec = bpf_map_lookup_elem(&devmap_xmit_cnt, &key);
242	if (!rec)
243		return 0;
244	rec->processed += ctx->sent;
245	rec->dropped   += ctx->drops;
246
247	/* Record bulk events, then userspace can calc average bulk size */
248	rec->info += 1;
249
250	/* Record error cases, where no frame were sent */
251	if (ctx->err)
252		rec->err++;
253
254	/* Catch API error of drv ndo_xdp_xmit sent more than count */
255	if (ctx->drops < 0)
256		rec->err++;
257
258	return 1;
259}