Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#undef TRACE_SYSTEM
  3#define TRACE_SYSTEM xdp
  4
  5#if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ)
  6#define _TRACE_XDP_H
  7
  8#include <linux/netdevice.h>
  9#include <linux/filter.h>
 10#include <linux/tracepoint.h>
 11#include <linux/bpf.h>
 12
 13#define __XDP_ACT_MAP(FN)	\
 14	FN(ABORTED)		\
 15	FN(DROP)		\
 16	FN(PASS)		\
 17	FN(TX)			\
 18	FN(REDIRECT)
 19
 20#define __XDP_ACT_TP_FN(x)	\
 21	TRACE_DEFINE_ENUM(XDP_##x);
 22#define __XDP_ACT_SYM_FN(x)	\
 23	{ XDP_##x, #x },
 24#define __XDP_ACT_SYM_TAB	\
 25	__XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, NULL }
 26__XDP_ACT_MAP(__XDP_ACT_TP_FN)
 27
 28TRACE_EVENT(xdp_exception,
 29
 30	TP_PROTO(const struct net_device *dev,
 31		 const struct bpf_prog *xdp, u32 act),
 32
 33	TP_ARGS(dev, xdp, act),
 34
 35	TP_STRUCT__entry(
 36		__field(int, prog_id)
 37		__field(u32, act)
 38		__field(int, ifindex)
 39	),
 40
 41	TP_fast_assign(
 42		__entry->prog_id	= xdp->aux->id;
 43		__entry->act		= act;
 44		__entry->ifindex	= dev->ifindex;
 45	),
 46
 47	TP_printk("prog_id=%d action=%s ifindex=%d",
 48		  __entry->prog_id,
 49		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
 50		  __entry->ifindex)
 51);
 52
 53TRACE_EVENT(xdp_bulk_tx,
 54
 55	TP_PROTO(const struct net_device *dev,
 56		 int sent, int drops, int err),
 57
 58	TP_ARGS(dev, sent, drops, err),
 59
 60	TP_STRUCT__entry(
 61		__field(int, ifindex)
 62		__field(u32, act)
 63		__field(int, drops)
 64		__field(int, sent)
 65		__field(int, err)
 66	),
 67
 68	TP_fast_assign(
 69		__entry->ifindex	= dev->ifindex;
 70		__entry->act		= XDP_TX;
 71		__entry->drops		= drops;
 72		__entry->sent		= sent;
 73		__entry->err		= err;
 74	),
 75
 76	TP_printk("ifindex=%d action=%s sent=%d drops=%d err=%d",
 77		  __entry->ifindex,
 78		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
 79		  __entry->sent, __entry->drops, __entry->err)
 80);
 81
 82#ifndef __DEVMAP_OBJ_TYPE
 83#define __DEVMAP_OBJ_TYPE
 84struct _bpf_dtab_netdev {
 85	struct net_device *dev;
 86};
 87#endif /* __DEVMAP_OBJ_TYPE */
 88
 89DECLARE_EVENT_CLASS(xdp_redirect_template,
 90
 91	TP_PROTO(const struct net_device *dev,
 92		 const struct bpf_prog *xdp,
 93		 const void *tgt, int err,
 94		 enum bpf_map_type map_type,
 95		 u32 map_id, u32 index),
 96
 97	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index),
 98
 99	TP_STRUCT__entry(
100		__field(int, prog_id)
101		__field(u32, act)
102		__field(int, ifindex)
103		__field(int, err)
104		__field(int, to_ifindex)
105		__field(u32, map_id)
106		__field(int, map_index)
107	),
108
109	TP_fast_assign(
110		u32 ifindex = 0, map_index = index;
111
112		if (map_type == BPF_MAP_TYPE_DEVMAP || map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
113			/* Just leave to_ifindex to 0 if do broadcast redirect,
114			 * as tgt will be NULL.
115			 */
116			if (tgt)
117				ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex;
118		} else if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
119			ifindex = index;
120			map_index = 0;
121		}
122
123		__entry->prog_id	= xdp->aux->id;
124		__entry->act		= XDP_REDIRECT;
125		__entry->ifindex	= dev->ifindex;
126		__entry->err		= err;
127		__entry->to_ifindex	= ifindex;
128		__entry->map_id		= map_id;
129		__entry->map_index	= map_index;
130	),
131
132	TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
133		  " map_id=%d map_index=%d",
134		  __entry->prog_id,
135		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
136		  __entry->ifindex, __entry->to_ifindex,
137		  __entry->err, __entry->map_id, __entry->map_index)
138);
139
140DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
141	TP_PROTO(const struct net_device *dev,
142		 const struct bpf_prog *xdp,
143		 const void *tgt, int err,
144		 enum bpf_map_type map_type,
145		 u32 map_id, u32 index),
146	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
147);
148
149DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
150	TP_PROTO(const struct net_device *dev,
151		 const struct bpf_prog *xdp,
152		 const void *tgt, int err,
153		 enum bpf_map_type map_type,
154		 u32 map_id, u32 index),
155	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
156);
157
158#define _trace_xdp_redirect(dev, xdp, to)						\
159	 trace_xdp_redirect(dev, xdp, NULL, 0, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
160
161#define _trace_xdp_redirect_err(dev, xdp, to, err)					\
162	 trace_xdp_redirect_err(dev, xdp, NULL, err, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
163
164#define _trace_xdp_redirect_map(dev, xdp, to, map_type, map_id, index) \
165	 trace_xdp_redirect(dev, xdp, to, 0, map_type, map_id, index)
166
167#define _trace_xdp_redirect_map_err(dev, xdp, to, map_type, map_id, index, err) \
168	 trace_xdp_redirect_err(dev, xdp, to, err, map_type, map_id, index)
169
170/* not used anymore, but kept around so as not to break old programs */
171DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
172	TP_PROTO(const struct net_device *dev,
173		 const struct bpf_prog *xdp,
174		 const void *tgt, int err,
175		 enum bpf_map_type map_type,
176		 u32 map_id, u32 index),
177	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
178);
179
180DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
181	TP_PROTO(const struct net_device *dev,
182		 const struct bpf_prog *xdp,
183		 const void *tgt, int err,
184		 enum bpf_map_type map_type,
185		 u32 map_id, u32 index),
186	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
187);
188
189TRACE_EVENT(xdp_cpumap_kthread,
190
191	TP_PROTO(int map_id, unsigned int processed,  unsigned int drops,
192		 int sched, struct xdp_cpumap_stats *xdp_stats),
193
194	TP_ARGS(map_id, processed, drops, sched, xdp_stats),
195
196	TP_STRUCT__entry(
197		__field(int, map_id)
198		__field(u32, act)
199		__field(int, cpu)
200		__field(unsigned int, drops)
201		__field(unsigned int, processed)
202		__field(int, sched)
203		__field(unsigned int, xdp_pass)
204		__field(unsigned int, xdp_drop)
205		__field(unsigned int, xdp_redirect)
206	),
207
208	TP_fast_assign(
209		__entry->map_id		= map_id;
210		__entry->act		= XDP_REDIRECT;
211		__entry->cpu		= smp_processor_id();
212		__entry->drops		= drops;
213		__entry->processed	= processed;
214		__entry->sched	= sched;
215		__entry->xdp_pass	= xdp_stats->pass;
216		__entry->xdp_drop	= xdp_stats->drop;
217		__entry->xdp_redirect	= xdp_stats->redirect;
218	),
219
220	TP_printk("kthread"
221		  " cpu=%d map_id=%d action=%s"
222		  " processed=%u drops=%u"
223		  " sched=%d"
224		  " xdp_pass=%u xdp_drop=%u xdp_redirect=%u",
225		  __entry->cpu, __entry->map_id,
226		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
227		  __entry->processed, __entry->drops,
228		  __entry->sched,
229		  __entry->xdp_pass, __entry->xdp_drop, __entry->xdp_redirect)
230);
231
232TRACE_EVENT(xdp_cpumap_enqueue,
233
234	TP_PROTO(int map_id, unsigned int processed,  unsigned int drops,
235		 int to_cpu),
236
237	TP_ARGS(map_id, processed, drops, to_cpu),
238
239	TP_STRUCT__entry(
240		__field(int, map_id)
241		__field(u32, act)
242		__field(int, cpu)
243		__field(unsigned int, drops)
244		__field(unsigned int, processed)
245		__field(int, to_cpu)
246	),
247
248	TP_fast_assign(
249		__entry->map_id		= map_id;
250		__entry->act		= XDP_REDIRECT;
251		__entry->cpu		= smp_processor_id();
252		__entry->drops		= drops;
253		__entry->processed	= processed;
254		__entry->to_cpu		= to_cpu;
255	),
256
257	TP_printk("enqueue"
258		  " cpu=%d map_id=%d action=%s"
259		  " processed=%u drops=%u"
260		  " to_cpu=%d",
261		  __entry->cpu, __entry->map_id,
262		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
263		  __entry->processed, __entry->drops,
264		  __entry->to_cpu)
265);
266
267TRACE_EVENT(xdp_devmap_xmit,
268
269	TP_PROTO(const struct net_device *from_dev,
270		 const struct net_device *to_dev,
271		 int sent, int drops, int err),
272
273	TP_ARGS(from_dev, to_dev, sent, drops, err),
274
275	TP_STRUCT__entry(
276		__field(int, from_ifindex)
277		__field(u32, act)
278		__field(int, to_ifindex)
279		__field(int, drops)
280		__field(int, sent)
281		__field(int, err)
282	),
283
284	TP_fast_assign(
285		__entry->from_ifindex	= from_dev->ifindex;
286		__entry->act		= XDP_REDIRECT;
287		__entry->to_ifindex	= to_dev->ifindex;
288		__entry->drops		= drops;
289		__entry->sent		= sent;
290		__entry->err		= err;
291	),
292
293	TP_printk("ndo_xdp_xmit"
294		  " from_ifindex=%d to_ifindex=%d action=%s"
295		  " sent=%d drops=%d"
296		  " err=%d",
297		  __entry->from_ifindex, __entry->to_ifindex,
298		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
299		  __entry->sent, __entry->drops,
300		  __entry->err)
301);
302
303/* Expect users already include <net/xdp.h>, but not xdp_priv.h */
304#include <net/xdp_priv.h>
305
306#define __MEM_TYPE_MAP(FN)	\
307	FN(PAGE_SHARED)		\
308	FN(PAGE_ORDER0)		\
309	FN(PAGE_POOL)		\
310	FN(XSK_BUFF_POOL)
311
312#define __MEM_TYPE_TP_FN(x)	\
313	TRACE_DEFINE_ENUM(MEM_TYPE_##x);
314#define __MEM_TYPE_SYM_FN(x)	\
315	{ MEM_TYPE_##x, #x },
316#define __MEM_TYPE_SYM_TAB	\
317	__MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 }
318__MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
319
320TRACE_EVENT(mem_disconnect,
321
322	TP_PROTO(const struct xdp_mem_allocator *xa),
323
324	TP_ARGS(xa),
325
326	TP_STRUCT__entry(
327		__field(const struct xdp_mem_allocator *,	xa)
328		__field(u32,		mem_id)
329		__field(u32,		mem_type)
330		__field(const void *,	allocator)
331	),
332
333	TP_fast_assign(
334		__entry->xa		= xa;
335		__entry->mem_id		= xa->mem.id;
336		__entry->mem_type	= xa->mem.type;
337		__entry->allocator	= xa->allocator;
338	),
339
340	TP_printk("mem_id=%d mem_type=%s allocator=%p",
341		  __entry->mem_id,
342		  __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
343		  __entry->allocator
344	)
345);
346
347TRACE_EVENT(mem_connect,
348
349	TP_PROTO(const struct xdp_mem_allocator *xa,
350		 const struct xdp_rxq_info *rxq),
351
352	TP_ARGS(xa, rxq),
353
354	TP_STRUCT__entry(
355		__field(const struct xdp_mem_allocator *,	xa)
356		__field(u32,		mem_id)
357		__field(u32,		mem_type)
358		__field(const void *,	allocator)
359		__field(const struct xdp_rxq_info *,		rxq)
360		__field(int,		ifindex)
361	),
362
363	TP_fast_assign(
364		__entry->xa		= xa;
365		__entry->mem_id		= xa->mem.id;
366		__entry->mem_type	= xa->mem.type;
367		__entry->allocator	= xa->allocator;
368		__entry->rxq		= rxq;
369		__entry->ifindex	= rxq->dev->ifindex;
370	),
371
372	TP_printk("mem_id=%d mem_type=%s allocator=%p"
373		  " ifindex=%d",
374		  __entry->mem_id,
375		  __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
376		  __entry->allocator,
377		  __entry->ifindex
378	)
379);
380
381TRACE_EVENT(mem_return_failed,
382
383	TP_PROTO(const struct xdp_mem_info *mem,
384		 const struct page *page),
385
386	TP_ARGS(mem, page),
387
388	TP_STRUCT__entry(
389		__field(const struct page *,	page)
390		__field(u32,		mem_id)
391		__field(u32,		mem_type)
392	),
393
394	TP_fast_assign(
395		__entry->page		= page;
396		__entry->mem_id		= mem->id;
397		__entry->mem_type	= mem->type;
398	),
399
400	TP_printk("mem_id=%d mem_type=%s page=%p",
401		  __entry->mem_id,
402		  __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
403		  __entry->page
404	)
405);
406
407#endif /* _TRACE_XDP_H */
408
409#include <trace/define_trace.h>
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#undef TRACE_SYSTEM
  3#define TRACE_SYSTEM xdp
  4
  5#if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ)
  6#define _TRACE_XDP_H
  7
  8#include <linux/netdevice.h>
  9#include <linux/filter.h>
 10#include <linux/tracepoint.h>
 11#include <linux/bpf.h>
 12
 13#define __XDP_ACT_MAP(FN)	\
 14	FN(ABORTED)		\
 15	FN(DROP)		\
 16	FN(PASS)		\
 17	FN(TX)			\
 18	FN(REDIRECT)
 19
 20#define __XDP_ACT_TP_FN(x)	\
 21	TRACE_DEFINE_ENUM(XDP_##x);
 22#define __XDP_ACT_SYM_FN(x)	\
 23	{ XDP_##x, #x },
 24#define __XDP_ACT_SYM_TAB	\
 25	__XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, NULL }
 26__XDP_ACT_MAP(__XDP_ACT_TP_FN)
 27
 28TRACE_EVENT(xdp_exception,
 29
 30	TP_PROTO(const struct net_device *dev,
 31		 const struct bpf_prog *xdp, u32 act),
 32
 33	TP_ARGS(dev, xdp, act),
 34
 35	TP_STRUCT__entry(
 36		__field(int, prog_id)
 37		__field(u32, act)
 38		__field(int, ifindex)
 39	),
 40
 41	TP_fast_assign(
 42		__entry->prog_id	= xdp->aux->id;
 43		__entry->act		= act;
 44		__entry->ifindex	= dev->ifindex;
 45	),
 46
 47	TP_printk("prog_id=%d action=%s ifindex=%d",
 48		  __entry->prog_id,
 49		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
 50		  __entry->ifindex)
 51);
 52
 53TRACE_EVENT(xdp_bulk_tx,
 54
 55	TP_PROTO(const struct net_device *dev,
 56		 int sent, int drops, int err),
 57
 58	TP_ARGS(dev, sent, drops, err),
 59
 60	TP_STRUCT__entry(
 61		__field(int, ifindex)
 62		__field(u32, act)
 63		__field(int, drops)
 64		__field(int, sent)
 65		__field(int, err)
 66	),
 67
 68	TP_fast_assign(
 69		__entry->ifindex	= dev->ifindex;
 70		__entry->act		= XDP_TX;
 71		__entry->drops		= drops;
 72		__entry->sent		= sent;
 73		__entry->err		= err;
 74	),
 75
 76	TP_printk("ifindex=%d action=%s sent=%d drops=%d err=%d",
 77		  __entry->ifindex,
 78		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
 79		  __entry->sent, __entry->drops, __entry->err)
 80);
 81
 82#ifndef __DEVMAP_OBJ_TYPE
 83#define __DEVMAP_OBJ_TYPE
 84struct _bpf_dtab_netdev {
 85	struct net_device *dev;
 86};
 87#endif /* __DEVMAP_OBJ_TYPE */
 88
 89DECLARE_EVENT_CLASS(xdp_redirect_template,
 90
 91	TP_PROTO(const struct net_device *dev,
 92		 const struct bpf_prog *xdp,
 93		 const void *tgt, int err,
 94		 enum bpf_map_type map_type,
 95		 u32 map_id, u32 index),
 96
 97	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index),
 98
 99	TP_STRUCT__entry(
100		__field(int, prog_id)
101		__field(u32, act)
102		__field(int, ifindex)
103		__field(int, err)
104		__field(int, to_ifindex)
105		__field(u32, map_id)
106		__field(int, map_index)
107	),
108
109	TP_fast_assign(
110		u32 ifindex = 0, map_index = index;
111
112		if (map_type == BPF_MAP_TYPE_DEVMAP || map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
113			/* Just leave to_ifindex to 0 if do broadcast redirect,
114			 * as tgt will be NULL.
115			 */
116			if (tgt)
117				ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex;
118		} else if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
119			ifindex = index;
120			map_index = 0;
121		}
122
123		__entry->prog_id	= xdp->aux->id;
124		__entry->act		= XDP_REDIRECT;
125		__entry->ifindex	= dev->ifindex;
126		__entry->err		= err;
127		__entry->to_ifindex	= ifindex;
128		__entry->map_id		= map_id;
129		__entry->map_index	= map_index;
130	),
131
132	TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
133		  " map_id=%d map_index=%d",
134		  __entry->prog_id,
135		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
136		  __entry->ifindex, __entry->to_ifindex,
137		  __entry->err, __entry->map_id, __entry->map_index)
138);
139
140DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
141	TP_PROTO(const struct net_device *dev,
142		 const struct bpf_prog *xdp,
143		 const void *tgt, int err,
144		 enum bpf_map_type map_type,
145		 u32 map_id, u32 index),
146	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
147);
148
149DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
150	TP_PROTO(const struct net_device *dev,
151		 const struct bpf_prog *xdp,
152		 const void *tgt, int err,
153		 enum bpf_map_type map_type,
154		 u32 map_id, u32 index),
155	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
156);
157
158#define _trace_xdp_redirect(dev, xdp, to)						\
159	 trace_xdp_redirect(dev, xdp, NULL, 0, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
160
161#define _trace_xdp_redirect_err(dev, xdp, to, err)					\
162	 trace_xdp_redirect_err(dev, xdp, NULL, err, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
163
164#define _trace_xdp_redirect_map(dev, xdp, to, map_type, map_id, index) \
165	 trace_xdp_redirect(dev, xdp, to, 0, map_type, map_id, index)
166
167#define _trace_xdp_redirect_map_err(dev, xdp, to, map_type, map_id, index, err) \
168	 trace_xdp_redirect_err(dev, xdp, to, err, map_type, map_id, index)
169
170/* not used anymore, but kept around so as not to break old programs */
171DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
172	TP_PROTO(const struct net_device *dev,
173		 const struct bpf_prog *xdp,
174		 const void *tgt, int err,
175		 enum bpf_map_type map_type,
176		 u32 map_id, u32 index),
177	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
178);
179
180DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
181	TP_PROTO(const struct net_device *dev,
182		 const struct bpf_prog *xdp,
183		 const void *tgt, int err,
184		 enum bpf_map_type map_type,
185		 u32 map_id, u32 index),
186	TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
187);
188
189TRACE_EVENT(xdp_cpumap_kthread,
190
191	TP_PROTO(int map_id, unsigned int processed,  unsigned int drops,
192		 int sched, struct xdp_cpumap_stats *xdp_stats),
193
194	TP_ARGS(map_id, processed, drops, sched, xdp_stats),
195
196	TP_STRUCT__entry(
197		__field(int, map_id)
198		__field(u32, act)
199		__field(int, cpu)
200		__field(unsigned int, drops)
201		__field(unsigned int, processed)
202		__field(int, sched)
203		__field(unsigned int, xdp_pass)
204		__field(unsigned int, xdp_drop)
205		__field(unsigned int, xdp_redirect)
206	),
207
208	TP_fast_assign(
209		__entry->map_id		= map_id;
210		__entry->act		= XDP_REDIRECT;
211		__entry->cpu		= smp_processor_id();
212		__entry->drops		= drops;
213		__entry->processed	= processed;
214		__entry->sched	= sched;
215		__entry->xdp_pass	= xdp_stats->pass;
216		__entry->xdp_drop	= xdp_stats->drop;
217		__entry->xdp_redirect	= xdp_stats->redirect;
218	),
219
220	TP_printk("kthread"
221		  " cpu=%d map_id=%d action=%s"
222		  " processed=%u drops=%u"
223		  " sched=%d"
224		  " xdp_pass=%u xdp_drop=%u xdp_redirect=%u",
225		  __entry->cpu, __entry->map_id,
226		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
227		  __entry->processed, __entry->drops,
228		  __entry->sched,
229		  __entry->xdp_pass, __entry->xdp_drop, __entry->xdp_redirect)
230);
231
232TRACE_EVENT(xdp_cpumap_enqueue,
233
234	TP_PROTO(int map_id, unsigned int processed,  unsigned int drops,
235		 int to_cpu),
236
237	TP_ARGS(map_id, processed, drops, to_cpu),
238
239	TP_STRUCT__entry(
240		__field(int, map_id)
241		__field(u32, act)
242		__field(int, cpu)
243		__field(unsigned int, drops)
244		__field(unsigned int, processed)
245		__field(int, to_cpu)
246	),
247
248	TP_fast_assign(
249		__entry->map_id		= map_id;
250		__entry->act		= XDP_REDIRECT;
251		__entry->cpu		= smp_processor_id();
252		__entry->drops		= drops;
253		__entry->processed	= processed;
254		__entry->to_cpu		= to_cpu;
255	),
256
257	TP_printk("enqueue"
258		  " cpu=%d map_id=%d action=%s"
259		  " processed=%u drops=%u"
260		  " to_cpu=%d",
261		  __entry->cpu, __entry->map_id,
262		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
263		  __entry->processed, __entry->drops,
264		  __entry->to_cpu)
265);
266
267TRACE_EVENT(xdp_devmap_xmit,
268
269	TP_PROTO(const struct net_device *from_dev,
270		 const struct net_device *to_dev,
271		 int sent, int drops, int err),
272
273	TP_ARGS(from_dev, to_dev, sent, drops, err),
274
275	TP_STRUCT__entry(
276		__field(int, from_ifindex)
277		__field(u32, act)
278		__field(int, to_ifindex)
279		__field(int, drops)
280		__field(int, sent)
281		__field(int, err)
282	),
283
284	TP_fast_assign(
285		__entry->from_ifindex	= from_dev->ifindex;
286		__entry->act		= XDP_REDIRECT;
287		__entry->to_ifindex	= to_dev->ifindex;
288		__entry->drops		= drops;
289		__entry->sent		= sent;
290		__entry->err		= err;
291	),
292
293	TP_printk("ndo_xdp_xmit"
294		  " from_ifindex=%d to_ifindex=%d action=%s"
295		  " sent=%d drops=%d"
296		  " err=%d",
297		  __entry->from_ifindex, __entry->to_ifindex,
298		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
299		  __entry->sent, __entry->drops,
300		  __entry->err)
301);
302
303/* Expect users already include <net/xdp.h>, but not xdp_priv.h */
304#include <net/xdp_priv.h>
305
306#define __MEM_TYPE_MAP(FN)	\
307	FN(PAGE_SHARED)		\
308	FN(PAGE_ORDER0)		\
309	FN(PAGE_POOL)		\
310	FN(XSK_BUFF_POOL)
311
312#define __MEM_TYPE_TP_FN(x)	\
313	TRACE_DEFINE_ENUM(MEM_TYPE_##x);
314#define __MEM_TYPE_SYM_FN(x)	\
315	{ MEM_TYPE_##x, #x },
316#define __MEM_TYPE_SYM_TAB	\
317	__MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 }
318__MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
319
320TRACE_EVENT(mem_disconnect,
321
322	TP_PROTO(const struct xdp_mem_allocator *xa),
323
324	TP_ARGS(xa),
325
326	TP_STRUCT__entry(
327		__field(const struct xdp_mem_allocator *,	xa)
328		__field(u32,		mem_id)
329		__field(u32,		mem_type)
330		__field(const void *,	allocator)
331	),
332
333	TP_fast_assign(
334		__entry->xa		= xa;
335		__entry->mem_id		= xa->mem.id;
336		__entry->mem_type	= xa->mem.type;
337		__entry->allocator	= xa->allocator;
338	),
339
340	TP_printk("mem_id=%d mem_type=%s allocator=%p",
341		  __entry->mem_id,
342		  __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
343		  __entry->allocator
344	)
345);
346
347TRACE_EVENT(mem_connect,
348
349	TP_PROTO(const struct xdp_mem_allocator *xa,
350		 const struct xdp_rxq_info *rxq),
351
352	TP_ARGS(xa, rxq),
353
354	TP_STRUCT__entry(
355		__field(const struct xdp_mem_allocator *,	xa)
356		__field(u32,		mem_id)
357		__field(u32,		mem_type)
358		__field(const void *,	allocator)
359		__field(const struct xdp_rxq_info *,		rxq)
360		__field(int,		ifindex)
361	),
362
363	TP_fast_assign(
364		__entry->xa		= xa;
365		__entry->mem_id		= xa->mem.id;
366		__entry->mem_type	= xa->mem.type;
367		__entry->allocator	= xa->allocator;
368		__entry->rxq		= rxq;
369		__entry->ifindex	= rxq->dev->ifindex;
370	),
371
372	TP_printk("mem_id=%d mem_type=%s allocator=%p"
373		  " ifindex=%d",
374		  __entry->mem_id,
375		  __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
376		  __entry->allocator,
377		  __entry->ifindex
378	)
379);
380
381TRACE_EVENT(mem_return_failed,
382
383	TP_PROTO(const struct xdp_mem_info *mem,
384		 const struct page *page),
385
386	TP_ARGS(mem, page),
387
388	TP_STRUCT__entry(
389		__field(const struct page *,	page)
390		__field(u32,		mem_id)
391		__field(u32,		mem_type)
392	),
393
394	TP_fast_assign(
395		__entry->page		= page;
396		__entry->mem_id		= mem->id;
397		__entry->mem_type	= mem->type;
398	),
399
400	TP_printk("mem_id=%d mem_type=%s page=%p",
401		  __entry->mem_id,
402		  __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
403		  __entry->page
404	)
405);
406
407#endif /* _TRACE_XDP_H */
408
409#include <trace/define_trace.h>