Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
  1/* SPDX-License-Identifier: GPL-2.0
  2 * Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc.
  3 */
  4static const char *__doc__=
  5 "XDP monitor tool, based on tracepoints\n"
  6;
  7
  8static const char *__doc_err_only__=
  9 " NOTICE: Only tracking XDP redirect errors\n"
 10 "         Enable TX success stats via '--stats'\n"
 11 "         (which comes with a per packet processing overhead)\n"
 12;
 13
 14#include <errno.h>
 15#include <stdio.h>
 16#include <stdlib.h>
 17#include <stdbool.h>
 18#include <stdint.h>
 19#include <string.h>
 20#include <ctype.h>
 21#include <unistd.h>
 22#include <locale.h>
 23
 24#include <sys/resource.h>
 25#include <getopt.h>
 26#include <net/if.h>
 27#include <time.h>
 28
 29#include <bpf/bpf.h>
 30#include "bpf_load.h"
 31#include "bpf_util.h"
 32
 33static int verbose = 1;
 34static bool debug = false;
 35
 36static const struct option long_options[] = {
 37	{"help",	no_argument,		NULL, 'h' },
 38	{"debug",	no_argument,		NULL, 'D' },
 39	{"stats",	no_argument,		NULL, 'S' },
 40	{"sec", 	required_argument,	NULL, 's' },
 41	{0, 0, NULL,  0 }
 42};
 43
 44/* C standard specifies two constants, EXIT_SUCCESS(0) and EXIT_FAILURE(1) */
 45#define EXIT_FAIL_MEM	5
 46
 47static void usage(char *argv[])
 48{
 49	int i;
 50	printf("\nDOCUMENTATION:\n%s\n", __doc__);
 51	printf("\n");
 52	printf(" Usage: %s (options-see-below)\n",
 53	       argv[0]);
 54	printf(" Listing options:\n");
 55	for (i = 0; long_options[i].name != 0; i++) {
 56		printf(" --%-15s", long_options[i].name);
 57		if (long_options[i].flag != NULL)
 58			printf(" flag (internal value:%d)",
 59			       *long_options[i].flag);
 60		else
 61			printf("short-option: -%c",
 62			       long_options[i].val);
 63		printf("\n");
 64	}
 65	printf("\n");
 66}
 67
 68#define NANOSEC_PER_SEC 1000000000 /* 10^9 */
 69static __u64 gettime(void)
 70{
 71	struct timespec t;
 72	int res;
 73
 74	res = clock_gettime(CLOCK_MONOTONIC, &t);
 75	if (res < 0) {
 76		fprintf(stderr, "Error with gettimeofday! (%i)\n", res);
 77		exit(EXIT_FAILURE);
 78	}
 79	return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
 80}
 81
 82enum {
 83	REDIR_SUCCESS = 0,
 84	REDIR_ERROR = 1,
 85};
 86#define REDIR_RES_MAX 2
 87static const char *redir_names[REDIR_RES_MAX] = {
 88	[REDIR_SUCCESS]	= "Success",
 89	[REDIR_ERROR]	= "Error",
 90};
 91static const char *err2str(int err)
 92{
 93	if (err < REDIR_RES_MAX)
 94		return redir_names[err];
 95	return NULL;
 96}
 97/* enum xdp_action */
 98#define XDP_UNKNOWN	XDP_REDIRECT + 1
 99#define XDP_ACTION_MAX (XDP_UNKNOWN + 1)
100static const char *xdp_action_names[XDP_ACTION_MAX] = {
101	[XDP_ABORTED]	= "XDP_ABORTED",
102	[XDP_DROP]	= "XDP_DROP",
103	[XDP_PASS]	= "XDP_PASS",
104	[XDP_TX]	= "XDP_TX",
105	[XDP_REDIRECT]	= "XDP_REDIRECT",
106	[XDP_UNKNOWN]	= "XDP_UNKNOWN",
107};
108static const char *action2str(int action)
109{
110	if (action < XDP_ACTION_MAX)
111		return xdp_action_names[action];
112	return NULL;
113}
114
115/* Common stats data record shared with _kern.c */
116struct datarec {
117	__u64 processed;
118	__u64 dropped;
119	__u64 info;
120	__u64 err;
121};
122#define MAX_CPUS 64
123
124/* Userspace structs for collection of stats from maps */
125struct record {
126	__u64 timestamp;
127	struct datarec total;
128	struct datarec *cpu;
129};
130struct u64rec {
131	__u64 processed;
132};
133struct record_u64 {
134	/* record for _kern side __u64 values */
135	__u64 timestamp;
136	struct u64rec total;
137	struct u64rec *cpu;
138};
139
140struct stats_record {
141	struct record_u64 xdp_redirect[REDIR_RES_MAX];
142	struct record_u64 xdp_exception[XDP_ACTION_MAX];
143	struct record xdp_cpumap_kthread;
144	struct record xdp_cpumap_enqueue[MAX_CPUS];
145	struct record xdp_devmap_xmit;
146};
147
148static bool map_collect_record(int fd, __u32 key, struct record *rec)
149{
150	/* For percpu maps, userspace gets a value per possible CPU */
151	unsigned int nr_cpus = bpf_num_possible_cpus();
152	struct datarec values[nr_cpus];
153	__u64 sum_processed = 0;
154	__u64 sum_dropped = 0;
155	__u64 sum_info = 0;
156	__u64 sum_err = 0;
157	int i;
158
159	if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
160		fprintf(stderr,
161			"ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
162		return false;
163	}
164	/* Get time as close as possible to reading map contents */
165	rec->timestamp = gettime();
166
167	/* Record and sum values from each CPU */
168	for (i = 0; i < nr_cpus; i++) {
169		rec->cpu[i].processed = values[i].processed;
170		sum_processed        += values[i].processed;
171		rec->cpu[i].dropped = values[i].dropped;
172		sum_dropped        += values[i].dropped;
173		rec->cpu[i].info = values[i].info;
174		sum_info        += values[i].info;
175		rec->cpu[i].err = values[i].err;
176		sum_err        += values[i].err;
177	}
178	rec->total.processed = sum_processed;
179	rec->total.dropped   = sum_dropped;
180	rec->total.info      = sum_info;
181	rec->total.err       = sum_err;
182	return true;
183}
184
185static bool map_collect_record_u64(int fd, __u32 key, struct record_u64 *rec)
186{
187	/* For percpu maps, userspace gets a value per possible CPU */
188	unsigned int nr_cpus = bpf_num_possible_cpus();
189	struct u64rec values[nr_cpus];
190	__u64 sum_total = 0;
191	int i;
192
193	if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
194		fprintf(stderr,
195			"ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
196		return false;
197	}
198	/* Get time as close as possible to reading map contents */
199	rec->timestamp = gettime();
200
201	/* Record and sum values from each CPU */
202	for (i = 0; i < nr_cpus; i++) {
203		rec->cpu[i].processed = values[i].processed;
204		sum_total            += values[i].processed;
205	}
206	rec->total.processed = sum_total;
207	return true;
208}
209
210static double calc_period(struct record *r, struct record *p)
211{
212	double period_ = 0;
213	__u64 period = 0;
214
215	period = r->timestamp - p->timestamp;
216	if (period > 0)
217		period_ = ((double) period / NANOSEC_PER_SEC);
218
219	return period_;
220}
221
222static double calc_period_u64(struct record_u64 *r, struct record_u64 *p)
223{
224	double period_ = 0;
225	__u64 period = 0;
226
227	period = r->timestamp - p->timestamp;
228	if (period > 0)
229		period_ = ((double) period / NANOSEC_PER_SEC);
230
231	return period_;
232}
233
234static double calc_pps(struct datarec *r, struct datarec *p, double period)
235{
236	__u64 packets = 0;
237	double pps = 0;
238
239	if (period > 0) {
240		packets = r->processed - p->processed;
241		pps = packets / period;
242	}
243	return pps;
244}
245
246static double calc_pps_u64(struct u64rec *r, struct u64rec *p, double period)
247{
248	__u64 packets = 0;
249	double pps = 0;
250
251	if (period > 0) {
252		packets = r->processed - p->processed;
253		pps = packets / period;
254	}
255	return pps;
256}
257
258static double calc_drop(struct datarec *r, struct datarec *p, double period)
259{
260	__u64 packets = 0;
261	double pps = 0;
262
263	if (period > 0) {
264		packets = r->dropped - p->dropped;
265		pps = packets / period;
266	}
267	return pps;
268}
269
270static double calc_info(struct datarec *r, struct datarec *p, double period)
271{
272	__u64 packets = 0;
273	double pps = 0;
274
275	if (period > 0) {
276		packets = r->info - p->info;
277		pps = packets / period;
278	}
279	return pps;
280}
281
282static double calc_err(struct datarec *r, struct datarec *p, double period)
283{
284	__u64 packets = 0;
285	double pps = 0;
286
287	if (period > 0) {
288		packets = r->err - p->err;
289		pps = packets / period;
290	}
291	return pps;
292}
293
294static void stats_print(struct stats_record *stats_rec,
295			struct stats_record *stats_prev,
296			bool err_only)
297{
298	unsigned int nr_cpus = bpf_num_possible_cpus();
299	int rec_i = 0, i, to_cpu;
300	double t = 0, pps = 0;
301
302	/* Header */
303	printf("%-15s %-7s %-12s %-12s %-9s\n",
304	       "XDP-event", "CPU:to", "pps", "drop-pps", "extra-info");
305
306	/* tracepoint: xdp:xdp_redirect_* */
307	if (err_only)
308		rec_i = REDIR_ERROR;
309
310	for (; rec_i < REDIR_RES_MAX; rec_i++) {
311		struct record_u64 *rec, *prev;
312		char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %s\n";
313		char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %s\n";
314
315		rec  =  &stats_rec->xdp_redirect[rec_i];
316		prev = &stats_prev->xdp_redirect[rec_i];
317		t = calc_period_u64(rec, prev);
318
319		for (i = 0; i < nr_cpus; i++) {
320			struct u64rec *r = &rec->cpu[i];
321			struct u64rec *p = &prev->cpu[i];
322
323			pps = calc_pps_u64(r, p, t);
324			if (pps > 0)
325				printf(fmt1, "XDP_REDIRECT", i,
326				       rec_i ? 0.0: pps, rec_i ? pps : 0.0,
327				       err2str(rec_i));
328		}
329		pps = calc_pps_u64(&rec->total, &prev->total, t);
330		printf(fmt2, "XDP_REDIRECT", "total",
331		       rec_i ? 0.0: pps, rec_i ? pps : 0.0, err2str(rec_i));
332	}
333
334	/* tracepoint: xdp:xdp_exception */
335	for (rec_i = 0; rec_i < XDP_ACTION_MAX; rec_i++) {
336		struct record_u64 *rec, *prev;
337		char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %s\n";
338		char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %s\n";
339
340		rec  =  &stats_rec->xdp_exception[rec_i];
341		prev = &stats_prev->xdp_exception[rec_i];
342		t = calc_period_u64(rec, prev);
343
344		for (i = 0; i < nr_cpus; i++) {
345			struct u64rec *r = &rec->cpu[i];
346			struct u64rec *p = &prev->cpu[i];
347
348			pps = calc_pps_u64(r, p, t);
349			if (pps > 0)
350				printf(fmt1, "Exception", i,
351				       0.0, pps, action2str(rec_i));
352		}
353		pps = calc_pps_u64(&rec->total, &prev->total, t);
354		if (pps > 0)
355			printf(fmt2, "Exception", "total",
356			       0.0, pps, action2str(rec_i));
357	}
358
359	/* cpumap enqueue stats */
360	for (to_cpu = 0; to_cpu < MAX_CPUS; to_cpu++) {
361		char *fmt1 = "%-15s %3d:%-3d %'-12.0f %'-12.0f %'-10.2f %s\n";
362		char *fmt2 = "%-15s %3s:%-3d %'-12.0f %'-12.0f %'-10.2f %s\n";
363		struct record *rec, *prev;
364		char *info_str = "";
365		double drop, info;
366
367		rec  =  &stats_rec->xdp_cpumap_enqueue[to_cpu];
368		prev = &stats_prev->xdp_cpumap_enqueue[to_cpu];
369		t = calc_period(rec, prev);
370		for (i = 0; i < nr_cpus; i++) {
371			struct datarec *r = &rec->cpu[i];
372			struct datarec *p = &prev->cpu[i];
373
374			pps  = calc_pps(r, p, t);
375			drop = calc_drop(r, p, t);
376			info = calc_info(r, p, t);
377			if (info > 0) {
378				info_str = "bulk-average";
379				info = pps / info; /* calc average bulk size */
380			}
381			if (pps > 0)
382				printf(fmt1, "cpumap-enqueue",
383				       i, to_cpu, pps, drop, info, info_str);
384		}
385		pps = calc_pps(&rec->total, &prev->total, t);
386		if (pps > 0) {
387			drop = calc_drop(&rec->total, &prev->total, t);
388			info = calc_info(&rec->total, &prev->total, t);
389			if (info > 0) {
390				info_str = "bulk-average";
391				info = pps / info; /* calc average bulk size */
392			}
393			printf(fmt2, "cpumap-enqueue",
394			       "sum", to_cpu, pps, drop, info, info_str);
395		}
396	}
397
398	/* cpumap kthread stats */
399	{
400		char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %'-10.0f %s\n";
401		char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %'-10.0f %s\n";
402		struct record *rec, *prev;
403		double drop, info;
404		char *i_str = "";
405
406		rec  =  &stats_rec->xdp_cpumap_kthread;
407		prev = &stats_prev->xdp_cpumap_kthread;
408		t = calc_period(rec, prev);
409		for (i = 0; i < nr_cpus; i++) {
410			struct datarec *r = &rec->cpu[i];
411			struct datarec *p = &prev->cpu[i];
412
413			pps  = calc_pps(r, p, t);
414			drop = calc_drop(r, p, t);
415			info = calc_info(r, p, t);
416			if (info > 0)
417				i_str = "sched";
418			if (pps > 0 || drop > 0)
419				printf(fmt1, "cpumap-kthread",
420				       i, pps, drop, info, i_str);
421		}
422		pps = calc_pps(&rec->total, &prev->total, t);
423		drop = calc_drop(&rec->total, &prev->total, t);
424		info = calc_info(&rec->total, &prev->total, t);
425		if (info > 0)
426			i_str = "sched-sum";
427		printf(fmt2, "cpumap-kthread", "total", pps, drop, info, i_str);
428	}
429
430	/* devmap ndo_xdp_xmit stats */
431	{
432		char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %'-10.2f %s %s\n";
433		char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %'-10.2f %s %s\n";
434		struct record *rec, *prev;
435		double drop, info, err;
436		char *i_str = "";
437		char *err_str = "";
438
439		rec  =  &stats_rec->xdp_devmap_xmit;
440		prev = &stats_prev->xdp_devmap_xmit;
441		t = calc_period(rec, prev);
442		for (i = 0; i < nr_cpus; i++) {
443			struct datarec *r = &rec->cpu[i];
444			struct datarec *p = &prev->cpu[i];
445
446			pps  = calc_pps(r, p, t);
447			drop = calc_drop(r, p, t);
448			info = calc_info(r, p, t);
449			err  = calc_err(r, p, t);
450			if (info > 0) {
451				i_str = "bulk-average";
452				info = (pps+drop) / info; /* calc avg bulk */
453			}
454			if (err > 0)
455				err_str = "drv-err";
456			if (pps > 0 || drop > 0)
457				printf(fmt1, "devmap-xmit",
458				       i, pps, drop, info, i_str, err_str);
459		}
460		pps = calc_pps(&rec->total, &prev->total, t);
461		drop = calc_drop(&rec->total, &prev->total, t);
462		info = calc_info(&rec->total, &prev->total, t);
463		err  = calc_err(&rec->total, &prev->total, t);
464		if (info > 0) {
465			i_str = "bulk-average";
466			info = (pps+drop) / info; /* calc avg bulk */
467		}
468		if (err > 0)
469			err_str = "drv-err";
470		printf(fmt2, "devmap-xmit", "total", pps, drop,
471		       info, i_str, err_str);
472	}
473
474	printf("\n");
475}
476
477static bool stats_collect(struct stats_record *rec)
478{
479	int fd;
480	int i;
481
482	/* TODO: Detect if someone unloaded the perf event_fd's, as
483	 * this can happen by someone running perf-record -e
484	 */
485
486	fd = map_data[0].fd; /* map0: redirect_err_cnt */
487	for (i = 0; i < REDIR_RES_MAX; i++)
488		map_collect_record_u64(fd, i, &rec->xdp_redirect[i]);
489
490	fd = map_data[1].fd; /* map1: exception_cnt */
491	for (i = 0; i < XDP_ACTION_MAX; i++) {
492		map_collect_record_u64(fd, i, &rec->xdp_exception[i]);
493	}
494
495	fd = map_data[2].fd; /* map2: cpumap_enqueue_cnt */
496	for (i = 0; i < MAX_CPUS; i++)
497		map_collect_record(fd, i, &rec->xdp_cpumap_enqueue[i]);
498
499	fd = map_data[3].fd; /* map3: cpumap_kthread_cnt */
500	map_collect_record(fd, 0, &rec->xdp_cpumap_kthread);
501
502	fd = map_data[4].fd; /* map4: devmap_xmit_cnt */
503	map_collect_record(fd, 0, &rec->xdp_devmap_xmit);
504
505	return true;
506}
507
508static void *alloc_rec_per_cpu(int record_size)
509{
510	unsigned int nr_cpus = bpf_num_possible_cpus();
511	void *array;
512
513	array = calloc(nr_cpus, record_size);
514	if (!array) {
515		fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
516		exit(EXIT_FAIL_MEM);
517	}
518	return array;
519}
520
521static struct stats_record *alloc_stats_record(void)
522{
523	struct stats_record *rec;
524	int rec_sz;
525	int i;
526
527	/* Alloc main stats_record structure */
528	rec = calloc(1, sizeof(*rec));
529	if (!rec) {
530		fprintf(stderr, "Mem alloc error\n");
531		exit(EXIT_FAIL_MEM);
532	}
533
534	/* Alloc stats stored per CPU for each record */
535	rec_sz = sizeof(struct u64rec);
536	for (i = 0; i < REDIR_RES_MAX; i++)
537		rec->xdp_redirect[i].cpu = alloc_rec_per_cpu(rec_sz);
538
539	for (i = 0; i < XDP_ACTION_MAX; i++)
540		rec->xdp_exception[i].cpu = alloc_rec_per_cpu(rec_sz);
541
542	rec_sz = sizeof(struct datarec);
543	rec->xdp_cpumap_kthread.cpu = alloc_rec_per_cpu(rec_sz);
544	rec->xdp_devmap_xmit.cpu    = alloc_rec_per_cpu(rec_sz);
545
546	for (i = 0; i < MAX_CPUS; i++)
547		rec->xdp_cpumap_enqueue[i].cpu = alloc_rec_per_cpu(rec_sz);
548
549	return rec;
550}
551
552static void free_stats_record(struct stats_record *r)
553{
554	int i;
555
556	for (i = 0; i < REDIR_RES_MAX; i++)
557		free(r->xdp_redirect[i].cpu);
558
559	for (i = 0; i < XDP_ACTION_MAX; i++)
560		free(r->xdp_exception[i].cpu);
561
562	free(r->xdp_cpumap_kthread.cpu);
563	free(r->xdp_devmap_xmit.cpu);
564
565	for (i = 0; i < MAX_CPUS; i++)
566		free(r->xdp_cpumap_enqueue[i].cpu);
567
568	free(r);
569}
570
571/* Pointer swap trick */
572static inline void swap(struct stats_record **a, struct stats_record **b)
573{
574	struct stats_record *tmp;
575
576	tmp = *a;
577	*a = *b;
578	*b = tmp;
579}
580
581static void stats_poll(int interval, bool err_only)
582{
583	struct stats_record *rec, *prev;
584
585	rec  = alloc_stats_record();
586	prev = alloc_stats_record();
587	stats_collect(rec);
588
589	if (err_only)
590		printf("\n%s\n", __doc_err_only__);
591
592	/* Trick to pretty printf with thousands separators use %' */
593	setlocale(LC_NUMERIC, "en_US");
594
595	/* Header */
596	if (verbose)
597		printf("\n%s", __doc__);
598
599	/* TODO Need more advanced stats on error types */
600	if (verbose) {
601		printf(" - Stats map0: %s\n", map_data[0].name);
602		printf(" - Stats map1: %s\n", map_data[1].name);
603		printf("\n");
604	}
605	fflush(stdout);
606
607	while (1) {
608		swap(&prev, &rec);
609		stats_collect(rec);
610		stats_print(rec, prev, err_only);
611		fflush(stdout);
612		sleep(interval);
613	}
614
615	free_stats_record(rec);
616	free_stats_record(prev);
617}
618
619static void print_bpf_prog_info(void)
620{
621	int i;
622
623	/* Prog info */
624	printf("Loaded BPF prog have %d bpf program(s)\n", prog_cnt);
625	for (i = 0; i < prog_cnt; i++) {
626		printf(" - prog_fd[%d] = fd(%d)\n", i, prog_fd[i]);
627	}
628
629	/* Maps info */
630	printf("Loaded BPF prog have %d map(s)\n", map_data_count);
631	for (i = 0; i < map_data_count; i++) {
632		char *name = map_data[i].name;
633		int fd     = map_data[i].fd;
634
635		printf(" - map_data[%d] = fd(%d) name:%s\n", i, fd, name);
636	}
637
638	/* Event info */
639	printf("Searching for (max:%d) event file descriptor(s)\n", prog_cnt);
640	for (i = 0; i < prog_cnt; i++) {
641		if (event_fd[i] != -1)
642			printf(" - event_fd[%d] = fd(%d)\n", i, event_fd[i]);
643	}
644}
645
646int main(int argc, char **argv)
647{
648	struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
649	int longindex = 0, opt;
650	int ret = EXIT_SUCCESS;
651	char bpf_obj_file[256];
652
653	/* Default settings: */
654	bool errors_only = true;
655	int interval = 2;
656
657	snprintf(bpf_obj_file, sizeof(bpf_obj_file), "%s_kern.o", argv[0]);
658
659	/* Parse commands line args */
660	while ((opt = getopt_long(argc, argv, "hDSs:",
661				  long_options, &longindex)) != -1) {
662		switch (opt) {
663		case 'D':
664			debug = true;
665			break;
666		case 'S':
667			errors_only = false;
668			break;
669		case 's':
670			interval = atoi(optarg);
671			break;
672		case 'h':
673		default:
674			usage(argv);
675			return EXIT_FAILURE;
676		}
677	}
678
679	if (setrlimit(RLIMIT_MEMLOCK, &r)) {
680		perror("setrlimit(RLIMIT_MEMLOCK)");
681		return EXIT_FAILURE;
682	}
683
684	if (load_bpf_file(bpf_obj_file)) {
685		printf("ERROR - bpf_log_buf: %s", bpf_log_buf);
686		return EXIT_FAILURE;
687	}
688	if (!prog_fd[0]) {
689		printf("ERROR - load_bpf_file: %s\n", strerror(errno));
690		return EXIT_FAILURE;
691	}
692
693	if (debug) {
694		print_bpf_prog_info();
695	}
696
697	/* Unload/stop tracepoint event by closing fd's */
698	if (errors_only) {
699		/* The prog_fd[i] and event_fd[i] depend on the
700		 * order the functions was defined in _kern.c
701		 */
702		close(event_fd[2]); /* tracepoint/xdp/xdp_redirect */
703		close(prog_fd[2]);  /* func: trace_xdp_redirect */
704		close(event_fd[3]); /* tracepoint/xdp/xdp_redirect_map */
705		close(prog_fd[3]);  /* func: trace_xdp_redirect_map */
706	}
707
708	stats_poll(interval, errors_only);
709
710	return ret;
711}