Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM xdp
4
5#if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_XDP_H
7
8#include <linux/netdevice.h>
9#include <linux/filter.h>
10#include <linux/tracepoint.h>
11#include <linux/bpf.h>
12#include <net/xdp.h>
13
14#define __XDP_ACT_MAP(FN) \
15 FN(ABORTED) \
16 FN(DROP) \
17 FN(PASS) \
18 FN(TX) \
19 FN(REDIRECT)
20
21#define __XDP_ACT_TP_FN(x) \
22 TRACE_DEFINE_ENUM(XDP_##x);
23#define __XDP_ACT_SYM_FN(x) \
24 { XDP_##x, #x },
25#define __XDP_ACT_SYM_TAB \
26 __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, NULL }
27__XDP_ACT_MAP(__XDP_ACT_TP_FN)
28
29TRACE_EVENT(xdp_exception,
30
31 TP_PROTO(const struct net_device *dev,
32 const struct bpf_prog *xdp, u32 act),
33
34 TP_ARGS(dev, xdp, act),
35
36 TP_STRUCT__entry(
37 __field(int, prog_id)
38 __field(u32, act)
39 __field(int, ifindex)
40 ),
41
42 TP_fast_assign(
43 __entry->prog_id = xdp->aux->id;
44 __entry->act = act;
45 __entry->ifindex = dev->ifindex;
46 ),
47
48 TP_printk("prog_id=%d action=%s ifindex=%d",
49 __entry->prog_id,
50 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
51 __entry->ifindex)
52);
53
54TRACE_EVENT(xdp_bulk_tx,
55
56 TP_PROTO(const struct net_device *dev,
57 int sent, int drops, int err),
58
59 TP_ARGS(dev, sent, drops, err),
60
61 TP_STRUCT__entry(
62 __field(int, ifindex)
63 __field(u32, act)
64 __field(int, drops)
65 __field(int, sent)
66 __field(int, err)
67 ),
68
69 TP_fast_assign(
70 __entry->ifindex = dev->ifindex;
71 __entry->act = XDP_TX;
72 __entry->drops = drops;
73 __entry->sent = sent;
74 __entry->err = err;
75 ),
76
77 TP_printk("ifindex=%d action=%s sent=%d drops=%d err=%d",
78 __entry->ifindex,
79 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
80 __entry->sent, __entry->drops, __entry->err)
81);
82
83#ifndef __DEVMAP_OBJ_TYPE
84#define __DEVMAP_OBJ_TYPE
85struct _bpf_dtab_netdev {
86 struct net_device *dev;
87};
88#endif /* __DEVMAP_OBJ_TYPE */
89
90DECLARE_EVENT_CLASS(xdp_redirect_template,
91
92 TP_PROTO(const struct net_device *dev,
93 const struct bpf_prog *xdp,
94 const void *tgt, int err,
95 enum bpf_map_type map_type,
96 u32 map_id, u32 index),
97
98 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index),
99
100 TP_STRUCT__entry(
101 __field(int, prog_id)
102 __field(u32, act)
103 __field(int, ifindex)
104 __field(int, err)
105 __field(int, to_ifindex)
106 __field(u32, map_id)
107 __field(int, map_index)
108 ),
109
110 TP_fast_assign(
111 u32 ifindex = 0, map_index = index;
112
113 if (map_type == BPF_MAP_TYPE_DEVMAP || map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
114 /* Just leave to_ifindex to 0 if do broadcast redirect,
115 * as tgt will be NULL.
116 */
117 if (tgt)
118 ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex;
119 } else if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
120 ifindex = index;
121 map_index = 0;
122 }
123
124 __entry->prog_id = xdp->aux->id;
125 __entry->act = XDP_REDIRECT;
126 __entry->ifindex = dev->ifindex;
127 __entry->err = err;
128 __entry->to_ifindex = ifindex;
129 __entry->map_id = map_id;
130 __entry->map_index = map_index;
131 ),
132
133 TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
134 " map_id=%d map_index=%d",
135 __entry->prog_id,
136 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
137 __entry->ifindex, __entry->to_ifindex,
138 __entry->err, __entry->map_id, __entry->map_index)
139);
140
141DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
142 TP_PROTO(const struct net_device *dev,
143 const struct bpf_prog *xdp,
144 const void *tgt, int err,
145 enum bpf_map_type map_type,
146 u32 map_id, u32 index),
147 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
148);
149
150DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
151 TP_PROTO(const struct net_device *dev,
152 const struct bpf_prog *xdp,
153 const void *tgt, int err,
154 enum bpf_map_type map_type,
155 u32 map_id, u32 index),
156 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
157);
158
159#define _trace_xdp_redirect(dev, xdp, to) \
160 trace_xdp_redirect(dev, xdp, NULL, 0, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
161
162#define _trace_xdp_redirect_err(dev, xdp, to, err) \
163 trace_xdp_redirect_err(dev, xdp, NULL, err, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
164
165#define _trace_xdp_redirect_map(dev, xdp, to, map_type, map_id, index) \
166 trace_xdp_redirect(dev, xdp, to, 0, map_type, map_id, index)
167
168#define _trace_xdp_redirect_map_err(dev, xdp, to, map_type, map_id, index, err) \
169 trace_xdp_redirect_err(dev, xdp, to, err, map_type, map_id, index)
170
171/* not used anymore, but kept around so as not to break old programs */
172DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
173 TP_PROTO(const struct net_device *dev,
174 const struct bpf_prog *xdp,
175 const void *tgt, int err,
176 enum bpf_map_type map_type,
177 u32 map_id, u32 index),
178 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
179);
180
181DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
182 TP_PROTO(const struct net_device *dev,
183 const struct bpf_prog *xdp,
184 const void *tgt, int err,
185 enum bpf_map_type map_type,
186 u32 map_id, u32 index),
187 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
188);
189
190TRACE_EVENT(xdp_cpumap_kthread,
191
192 TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
193 int sched, struct xdp_cpumap_stats *xdp_stats),
194
195 TP_ARGS(map_id, processed, drops, sched, xdp_stats),
196
197 TP_STRUCT__entry(
198 __field(int, map_id)
199 __field(u32, act)
200 __field(int, cpu)
201 __field(unsigned int, drops)
202 __field(unsigned int, processed)
203 __field(int, sched)
204 __field(unsigned int, xdp_pass)
205 __field(unsigned int, xdp_drop)
206 __field(unsigned int, xdp_redirect)
207 ),
208
209 TP_fast_assign(
210 __entry->map_id = map_id;
211 __entry->act = XDP_REDIRECT;
212 __entry->cpu = smp_processor_id();
213 __entry->drops = drops;
214 __entry->processed = processed;
215 __entry->sched = sched;
216 __entry->xdp_pass = xdp_stats->pass;
217 __entry->xdp_drop = xdp_stats->drop;
218 __entry->xdp_redirect = xdp_stats->redirect;
219 ),
220
221 TP_printk("kthread"
222 " cpu=%d map_id=%d action=%s"
223 " processed=%u drops=%u"
224 " sched=%d"
225 " xdp_pass=%u xdp_drop=%u xdp_redirect=%u",
226 __entry->cpu, __entry->map_id,
227 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
228 __entry->processed, __entry->drops,
229 __entry->sched,
230 __entry->xdp_pass, __entry->xdp_drop, __entry->xdp_redirect)
231);
232
233TRACE_EVENT(xdp_cpumap_enqueue,
234
235 TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
236 int to_cpu),
237
238 TP_ARGS(map_id, processed, drops, to_cpu),
239
240 TP_STRUCT__entry(
241 __field(int, map_id)
242 __field(u32, act)
243 __field(int, cpu)
244 __field(unsigned int, drops)
245 __field(unsigned int, processed)
246 __field(int, to_cpu)
247 ),
248
249 TP_fast_assign(
250 __entry->map_id = map_id;
251 __entry->act = XDP_REDIRECT;
252 __entry->cpu = smp_processor_id();
253 __entry->drops = drops;
254 __entry->processed = processed;
255 __entry->to_cpu = to_cpu;
256 ),
257
258 TP_printk("enqueue"
259 " cpu=%d map_id=%d action=%s"
260 " processed=%u drops=%u"
261 " to_cpu=%d",
262 __entry->cpu, __entry->map_id,
263 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
264 __entry->processed, __entry->drops,
265 __entry->to_cpu)
266);
267
268TRACE_EVENT(xdp_devmap_xmit,
269
270 TP_PROTO(const struct net_device *from_dev,
271 const struct net_device *to_dev,
272 int sent, int drops, int err),
273
274 TP_ARGS(from_dev, to_dev, sent, drops, err),
275
276 TP_STRUCT__entry(
277 __field(int, from_ifindex)
278 __field(u32, act)
279 __field(int, to_ifindex)
280 __field(int, drops)
281 __field(int, sent)
282 __field(int, err)
283 ),
284
285 TP_fast_assign(
286 __entry->from_ifindex = from_dev->ifindex;
287 __entry->act = XDP_REDIRECT;
288 __entry->to_ifindex = to_dev->ifindex;
289 __entry->drops = drops;
290 __entry->sent = sent;
291 __entry->err = err;
292 ),
293
294 TP_printk("ndo_xdp_xmit"
295 " from_ifindex=%d to_ifindex=%d action=%s"
296 " sent=%d drops=%d"
297 " err=%d",
298 __entry->from_ifindex, __entry->to_ifindex,
299 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
300 __entry->sent, __entry->drops,
301 __entry->err)
302);
303
304/* Expect users already include <net/xdp.h>, but not xdp_priv.h */
305#include <net/xdp_priv.h>
306
307#define __MEM_TYPE_MAP(FN) \
308 FN(PAGE_SHARED) \
309 FN(PAGE_ORDER0) \
310 FN(PAGE_POOL) \
311 FN(XSK_BUFF_POOL)
312
313#define __MEM_TYPE_TP_FN(x) \
314 TRACE_DEFINE_ENUM(MEM_TYPE_##x);
315#define __MEM_TYPE_SYM_FN(x) \
316 { MEM_TYPE_##x, #x },
317#define __MEM_TYPE_SYM_TAB \
318 __MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 }
319__MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
320
321TRACE_EVENT(mem_disconnect,
322
323 TP_PROTO(const struct xdp_mem_allocator *xa),
324
325 TP_ARGS(xa),
326
327 TP_STRUCT__entry(
328 __field(const struct xdp_mem_allocator *, xa)
329 __field(u32, mem_id)
330 __field(u32, mem_type)
331 __field(const void *, allocator)
332 ),
333
334 TP_fast_assign(
335 __entry->xa = xa;
336 __entry->mem_id = xa->mem.id;
337 __entry->mem_type = xa->mem.type;
338 __entry->allocator = xa->allocator;
339 ),
340
341 TP_printk("mem_id=%d mem_type=%s allocator=%p",
342 __entry->mem_id,
343 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
344 __entry->allocator
345 )
346);
347
348TRACE_EVENT(mem_connect,
349
350 TP_PROTO(const struct xdp_mem_allocator *xa,
351 const struct xdp_rxq_info *rxq),
352
353 TP_ARGS(xa, rxq),
354
355 TP_STRUCT__entry(
356 __field(const struct xdp_mem_allocator *, xa)
357 __field(u32, mem_id)
358 __field(u32, mem_type)
359 __field(const void *, allocator)
360 __field(const struct xdp_rxq_info *, rxq)
361 __field(int, ifindex)
362 ),
363
364 TP_fast_assign(
365 __entry->xa = xa;
366 __entry->mem_id = xa->mem.id;
367 __entry->mem_type = xa->mem.type;
368 __entry->allocator = xa->allocator;
369 __entry->rxq = rxq;
370 __entry->ifindex = rxq->dev->ifindex;
371 ),
372
373 TP_printk("mem_id=%d mem_type=%s allocator=%p"
374 " ifindex=%d",
375 __entry->mem_id,
376 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
377 __entry->allocator,
378 __entry->ifindex
379 )
380);
381
382TRACE_EVENT(mem_return_failed,
383
384 TP_PROTO(const struct xdp_mem_info *mem,
385 const struct page *page),
386
387 TP_ARGS(mem, page),
388
389 TP_STRUCT__entry(
390 __field(const struct page *, page)
391 __field(u32, mem_id)
392 __field(u32, mem_type)
393 ),
394
395 TP_fast_assign(
396 __entry->page = page;
397 __entry->mem_id = mem->id;
398 __entry->mem_type = mem->type;
399 ),
400
401 TP_printk("mem_id=%d mem_type=%s page=%p",
402 __entry->mem_id,
403 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
404 __entry->page
405 )
406);
407
408TRACE_EVENT(bpf_xdp_link_attach_failed,
409
410 TP_PROTO(const char *msg),
411
412 TP_ARGS(msg),
413
414 TP_STRUCT__entry(
415 __string(msg, msg)
416 ),
417
418 TP_fast_assign(
419 __assign_str(msg);
420 ),
421
422 TP_printk("errmsg=%s", __get_str(msg))
423);
424
425#endif /* _TRACE_XDP_H */
426
427#include <trace/define_trace.h>
1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM xdp
4
5#if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_XDP_H
7
8#include <linux/netdevice.h>
9#include <linux/filter.h>
10#include <linux/tracepoint.h>
11#include <linux/bpf.h>
12
13#define __XDP_ACT_MAP(FN) \
14 FN(ABORTED) \
15 FN(DROP) \
16 FN(PASS) \
17 FN(TX) \
18 FN(REDIRECT)
19
20#define __XDP_ACT_TP_FN(x) \
21 TRACE_DEFINE_ENUM(XDP_##x);
22#define __XDP_ACT_SYM_FN(x) \
23 { XDP_##x, #x },
24#define __XDP_ACT_SYM_TAB \
25 __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, 0 }
26__XDP_ACT_MAP(__XDP_ACT_TP_FN)
27
28TRACE_EVENT(xdp_exception,
29
30 TP_PROTO(const struct net_device *dev,
31 const struct bpf_prog *xdp, u32 act),
32
33 TP_ARGS(dev, xdp, act),
34
35 TP_STRUCT__entry(
36 __field(int, prog_id)
37 __field(u32, act)
38 __field(int, ifindex)
39 ),
40
41 TP_fast_assign(
42 __entry->prog_id = xdp->aux->id;
43 __entry->act = act;
44 __entry->ifindex = dev->ifindex;
45 ),
46
47 TP_printk("prog_id=%d action=%s ifindex=%d",
48 __entry->prog_id,
49 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
50 __entry->ifindex)
51);
52
53TRACE_EVENT(xdp_bulk_tx,
54
55 TP_PROTO(const struct net_device *dev,
56 int sent, int drops, int err),
57
58 TP_ARGS(dev, sent, drops, err),
59
60 TP_STRUCT__entry(
61 __field(int, ifindex)
62 __field(u32, act)
63 __field(int, drops)
64 __field(int, sent)
65 __field(int, err)
66 ),
67
68 TP_fast_assign(
69 __entry->ifindex = dev->ifindex;
70 __entry->act = XDP_TX;
71 __entry->drops = drops;
72 __entry->sent = sent;
73 __entry->err = err;
74 ),
75
76 TP_printk("ifindex=%d action=%s sent=%d drops=%d err=%d",
77 __entry->ifindex,
78 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
79 __entry->sent, __entry->drops, __entry->err)
80);
81
82DECLARE_EVENT_CLASS(xdp_redirect_template,
83
84 TP_PROTO(const struct net_device *dev,
85 const struct bpf_prog *xdp,
86 int to_ifindex, int err,
87 const struct bpf_map *map, u32 map_index),
88
89 TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
90
91 TP_STRUCT__entry(
92 __field(int, prog_id)
93 __field(u32, act)
94 __field(int, ifindex)
95 __field(int, err)
96 __field(int, to_ifindex)
97 __field(u32, map_id)
98 __field(int, map_index)
99 ),
100
101 TP_fast_assign(
102 __entry->prog_id = xdp->aux->id;
103 __entry->act = XDP_REDIRECT;
104 __entry->ifindex = dev->ifindex;
105 __entry->err = err;
106 __entry->to_ifindex = to_ifindex;
107 __entry->map_id = map ? map->id : 0;
108 __entry->map_index = map_index;
109 ),
110
111 TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d",
112 __entry->prog_id,
113 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
114 __entry->ifindex, __entry->to_ifindex,
115 __entry->err)
116);
117
118DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
119 TP_PROTO(const struct net_device *dev,
120 const struct bpf_prog *xdp,
121 int to_ifindex, int err,
122 const struct bpf_map *map, u32 map_index),
123 TP_ARGS(dev, xdp, to_ifindex, err, map, map_index)
124);
125
126DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
127 TP_PROTO(const struct net_device *dev,
128 const struct bpf_prog *xdp,
129 int to_ifindex, int err,
130 const struct bpf_map *map, u32 map_index),
131 TP_ARGS(dev, xdp, to_ifindex, err, map, map_index)
132);
133
134#define _trace_xdp_redirect(dev, xdp, to) \
135 trace_xdp_redirect(dev, xdp, to, 0, NULL, 0);
136
137#define _trace_xdp_redirect_err(dev, xdp, to, err) \
138 trace_xdp_redirect_err(dev, xdp, to, err, NULL, 0);
139
140DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map,
141 TP_PROTO(const struct net_device *dev,
142 const struct bpf_prog *xdp,
143 int to_ifindex, int err,
144 const struct bpf_map *map, u32 map_index),
145 TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
146 TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
147 " map_id=%d map_index=%d",
148 __entry->prog_id,
149 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
150 __entry->ifindex, __entry->to_ifindex,
151 __entry->err,
152 __entry->map_id, __entry->map_index)
153);
154
155DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map_err,
156 TP_PROTO(const struct net_device *dev,
157 const struct bpf_prog *xdp,
158 int to_ifindex, int err,
159 const struct bpf_map *map, u32 map_index),
160 TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
161 TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
162 " map_id=%d map_index=%d",
163 __entry->prog_id,
164 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
165 __entry->ifindex, __entry->to_ifindex,
166 __entry->err,
167 __entry->map_id, __entry->map_index)
168);
169
170#ifndef __DEVMAP_OBJ_TYPE
171#define __DEVMAP_OBJ_TYPE
172struct _bpf_dtab_netdev {
173 struct net_device *dev;
174};
175#endif /* __DEVMAP_OBJ_TYPE */
176
177#define devmap_ifindex(fwd, map) \
178 ((map->map_type == BPF_MAP_TYPE_DEVMAP || \
179 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) ? \
180 ((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0)
181
182#define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \
183 trace_xdp_redirect_map(dev, xdp, devmap_ifindex(fwd, map), \
184 0, map, idx)
185
186#define _trace_xdp_redirect_map_err(dev, xdp, fwd, map, idx, err) \
187 trace_xdp_redirect_map_err(dev, xdp, devmap_ifindex(fwd, map), \
188 err, map, idx)
189
190TRACE_EVENT(xdp_cpumap_kthread,
191
192 TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
193 int sched),
194
195 TP_ARGS(map_id, processed, drops, sched),
196
197 TP_STRUCT__entry(
198 __field(int, map_id)
199 __field(u32, act)
200 __field(int, cpu)
201 __field(unsigned int, drops)
202 __field(unsigned int, processed)
203 __field(int, sched)
204 ),
205
206 TP_fast_assign(
207 __entry->map_id = map_id;
208 __entry->act = XDP_REDIRECT;
209 __entry->cpu = smp_processor_id();
210 __entry->drops = drops;
211 __entry->processed = processed;
212 __entry->sched = sched;
213 ),
214
215 TP_printk("kthread"
216 " cpu=%d map_id=%d action=%s"
217 " processed=%u drops=%u"
218 " sched=%d",
219 __entry->cpu, __entry->map_id,
220 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
221 __entry->processed, __entry->drops,
222 __entry->sched)
223);
224
225TRACE_EVENT(xdp_cpumap_enqueue,
226
227 TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
228 int to_cpu),
229
230 TP_ARGS(map_id, processed, drops, to_cpu),
231
232 TP_STRUCT__entry(
233 __field(int, map_id)
234 __field(u32, act)
235 __field(int, cpu)
236 __field(unsigned int, drops)
237 __field(unsigned int, processed)
238 __field(int, to_cpu)
239 ),
240
241 TP_fast_assign(
242 __entry->map_id = map_id;
243 __entry->act = XDP_REDIRECT;
244 __entry->cpu = smp_processor_id();
245 __entry->drops = drops;
246 __entry->processed = processed;
247 __entry->to_cpu = to_cpu;
248 ),
249
250 TP_printk("enqueue"
251 " cpu=%d map_id=%d action=%s"
252 " processed=%u drops=%u"
253 " to_cpu=%d",
254 __entry->cpu, __entry->map_id,
255 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
256 __entry->processed, __entry->drops,
257 __entry->to_cpu)
258);
259
260TRACE_EVENT(xdp_devmap_xmit,
261
262 TP_PROTO(const struct bpf_map *map, u32 map_index,
263 int sent, int drops,
264 const struct net_device *from_dev,
265 const struct net_device *to_dev, int err),
266
267 TP_ARGS(map, map_index, sent, drops, from_dev, to_dev, err),
268
269 TP_STRUCT__entry(
270 __field(int, map_id)
271 __field(u32, act)
272 __field(u32, map_index)
273 __field(int, drops)
274 __field(int, sent)
275 __field(int, from_ifindex)
276 __field(int, to_ifindex)
277 __field(int, err)
278 ),
279
280 TP_fast_assign(
281 __entry->map_id = map->id;
282 __entry->act = XDP_REDIRECT;
283 __entry->map_index = map_index;
284 __entry->drops = drops;
285 __entry->sent = sent;
286 __entry->from_ifindex = from_dev->ifindex;
287 __entry->to_ifindex = to_dev->ifindex;
288 __entry->err = err;
289 ),
290
291 TP_printk("ndo_xdp_xmit"
292 " map_id=%d map_index=%d action=%s"
293 " sent=%d drops=%d"
294 " from_ifindex=%d to_ifindex=%d err=%d",
295 __entry->map_id, __entry->map_index,
296 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
297 __entry->sent, __entry->drops,
298 __entry->from_ifindex, __entry->to_ifindex, __entry->err)
299);
300
301/* Expect users already include <net/xdp.h>, but not xdp_priv.h */
302#include <net/xdp_priv.h>
303
304#define __MEM_TYPE_MAP(FN) \
305 FN(PAGE_SHARED) \
306 FN(PAGE_ORDER0) \
307 FN(PAGE_POOL) \
308 FN(ZERO_COPY)
309
310#define __MEM_TYPE_TP_FN(x) \
311 TRACE_DEFINE_ENUM(MEM_TYPE_##x);
312#define __MEM_TYPE_SYM_FN(x) \
313 { MEM_TYPE_##x, #x },
314#define __MEM_TYPE_SYM_TAB \
315 __MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 }
316__MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
317
318TRACE_EVENT(mem_disconnect,
319
320 TP_PROTO(const struct xdp_mem_allocator *xa,
321 bool safe_to_remove, bool force),
322
323 TP_ARGS(xa, safe_to_remove, force),
324
325 TP_STRUCT__entry(
326 __field(const struct xdp_mem_allocator *, xa)
327 __field(u32, mem_id)
328 __field(u32, mem_type)
329 __field(const void *, allocator)
330 __field(bool, safe_to_remove)
331 __field(bool, force)
332 __field(int, disconnect_cnt)
333 ),
334
335 TP_fast_assign(
336 __entry->xa = xa;
337 __entry->mem_id = xa->mem.id;
338 __entry->mem_type = xa->mem.type;
339 __entry->allocator = xa->allocator;
340 __entry->safe_to_remove = safe_to_remove;
341 __entry->force = force;
342 __entry->disconnect_cnt = xa->disconnect_cnt;
343 ),
344
345 TP_printk("mem_id=%d mem_type=%s allocator=%p"
346 " safe_to_remove=%s force=%s disconnect_cnt=%d",
347 __entry->mem_id,
348 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
349 __entry->allocator,
350 __entry->safe_to_remove ? "true" : "false",
351 __entry->force ? "true" : "false",
352 __entry->disconnect_cnt
353 )
354);
355
356TRACE_EVENT(mem_connect,
357
358 TP_PROTO(const struct xdp_mem_allocator *xa,
359 const struct xdp_rxq_info *rxq),
360
361 TP_ARGS(xa, rxq),
362
363 TP_STRUCT__entry(
364 __field(const struct xdp_mem_allocator *, xa)
365 __field(u32, mem_id)
366 __field(u32, mem_type)
367 __field(const void *, allocator)
368 __field(const struct xdp_rxq_info *, rxq)
369 __field(int, ifindex)
370 ),
371
372 TP_fast_assign(
373 __entry->xa = xa;
374 __entry->mem_id = xa->mem.id;
375 __entry->mem_type = xa->mem.type;
376 __entry->allocator = xa->allocator;
377 __entry->rxq = rxq;
378 __entry->ifindex = rxq->dev->ifindex;
379 ),
380
381 TP_printk("mem_id=%d mem_type=%s allocator=%p"
382 " ifindex=%d",
383 __entry->mem_id,
384 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
385 __entry->allocator,
386 __entry->ifindex
387 )
388);
389
390TRACE_EVENT(mem_return_failed,
391
392 TP_PROTO(const struct xdp_mem_info *mem,
393 const struct page *page),
394
395 TP_ARGS(mem, page),
396
397 TP_STRUCT__entry(
398 __field(const struct page *, page)
399 __field(u32, mem_id)
400 __field(u32, mem_type)
401 ),
402
403 TP_fast_assign(
404 __entry->page = page;
405 __entry->mem_id = mem->id;
406 __entry->mem_type = mem->type;
407 ),
408
409 TP_printk("mem_id=%d mem_type=%s page=%p",
410 __entry->mem_id,
411 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
412 __entry->page
413 )
414);
415
416#endif /* _TRACE_XDP_H */
417
418#include <trace/define_trace.h>