Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2020 Facebook */
  3
  4#include <linux/fs.h>
  5#include <linux/anon_inodes.h>
  6#include <linux/filter.h>
  7#include <linux/bpf.h>
 
  8
  9struct bpf_iter_target_info {
 10	struct list_head list;
 11	const struct bpf_iter_reg *reg_info;
 12	u32 btf_id;	/* cached value */
 13};
 14
 15struct bpf_iter_link {
 16	struct bpf_link link;
 17	struct bpf_iter_aux_info aux;
 18	struct bpf_iter_target_info *tinfo;
 19};
 20
 21struct bpf_iter_priv_data {
 22	struct bpf_iter_target_info *tinfo;
 23	const struct bpf_iter_seq_info *seq_info;
 24	struct bpf_prog *prog;
 25	u64 session_id;
 26	u64 seq_num;
 27	bool done_stop;
 28	u8 target_private[] __aligned(8);
 29};
 30
 31static struct list_head targets = LIST_HEAD_INIT(targets);
 32static DEFINE_MUTEX(targets_mutex);
 33
 34/* protect bpf_iter_link changes */
 35static DEFINE_MUTEX(link_mutex);
 36
 37/* incremented on every opened seq_file */
 38static atomic64_t session_id;
 39
 40static int prepare_seq_file(struct file *file, struct bpf_iter_link *link,
 41			    const struct bpf_iter_seq_info *seq_info);
 42
 43static void bpf_iter_inc_seq_num(struct seq_file *seq)
 44{
 45	struct bpf_iter_priv_data *iter_priv;
 46
 47	iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
 48				 target_private);
 49	iter_priv->seq_num++;
 50}
 51
 52static void bpf_iter_dec_seq_num(struct seq_file *seq)
 53{
 54	struct bpf_iter_priv_data *iter_priv;
 55
 56	iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
 57				 target_private);
 58	iter_priv->seq_num--;
 59}
 60
 61static void bpf_iter_done_stop(struct seq_file *seq)
 62{
 63	struct bpf_iter_priv_data *iter_priv;
 64
 65	iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
 66				 target_private);
 67	iter_priv->done_stop = true;
 68}
 69
 
 
 
 
 
 70static bool bpf_iter_support_resched(struct seq_file *seq)
 71{
 72	struct bpf_iter_priv_data *iter_priv;
 73
 74	iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
 75				 target_private);
 76	return iter_priv->tinfo->reg_info->feature & BPF_ITER_RESCHED;
 77}
 78
 79/* maximum visited objects before bailing out */
 80#define MAX_ITER_OBJECTS	1000000
 81
 82/* bpf_seq_read, a customized and simpler version for bpf iterator.
 83 * no_llseek is assumed for this file.
 84 * The following are differences from seq_read():
 85 *  . fixed buffer size (PAGE_SIZE)
 86 *  . assuming no_llseek
 87 *  . stop() may call bpf program, handling potential overflow there
 88 */
 89static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size,
 90			    loff_t *ppos)
 91{
 92	struct seq_file *seq = file->private_data;
 93	size_t n, offs, copied = 0;
 94	int err = 0, num_objs = 0;
 95	bool can_resched;
 96	void *p;
 97
 98	mutex_lock(&seq->lock);
 99
100	if (!seq->buf) {
101		seq->size = PAGE_SIZE << 3;
102		seq->buf = kvmalloc(seq->size, GFP_KERNEL);
103		if (!seq->buf) {
104			err = -ENOMEM;
105			goto done;
106		}
107	}
108
109	if (seq->count) {
110		n = min(seq->count, size);
111		err = copy_to_user(buf, seq->buf + seq->from, n);
112		if (err) {
113			err = -EFAULT;
114			goto done;
115		}
116		seq->count -= n;
117		seq->from += n;
118		copied = n;
119		goto done;
120	}
121
122	seq->from = 0;
123	p = seq->op->start(seq, &seq->index);
124	if (!p)
125		goto stop;
126	if (IS_ERR(p)) {
127		err = PTR_ERR(p);
128		seq->op->stop(seq, p);
129		seq->count = 0;
130		goto done;
131	}
132
133	err = seq->op->show(seq, p);
134	if (err > 0) {
135		/* object is skipped, decrease seq_num, so next
136		 * valid object can reuse the same seq_num.
137		 */
138		bpf_iter_dec_seq_num(seq);
139		seq->count = 0;
140	} else if (err < 0 || seq_has_overflowed(seq)) {
141		if (!err)
142			err = -E2BIG;
143		seq->op->stop(seq, p);
144		seq->count = 0;
145		goto done;
146	}
147
148	can_resched = bpf_iter_support_resched(seq);
149	while (1) {
150		loff_t pos = seq->index;
151
152		num_objs++;
153		offs = seq->count;
154		p = seq->op->next(seq, p, &seq->index);
155		if (pos == seq->index) {
156			pr_info_ratelimited("buggy seq_file .next function %ps "
157				"did not updated position index\n",
158				seq->op->next);
159			seq->index++;
160		}
161
162		if (IS_ERR_OR_NULL(p))
163			break;
164
165		/* got a valid next object, increase seq_num */
166		bpf_iter_inc_seq_num(seq);
167
168		if (seq->count >= size)
169			break;
170
171		if (num_objs >= MAX_ITER_OBJECTS) {
172			if (offs == 0) {
173				err = -EAGAIN;
174				seq->op->stop(seq, p);
175				goto done;
176			}
177			break;
178		}
179
180		err = seq->op->show(seq, p);
181		if (err > 0) {
182			bpf_iter_dec_seq_num(seq);
183			seq->count = offs;
184		} else if (err < 0 || seq_has_overflowed(seq)) {
185			seq->count = offs;
186			if (offs == 0) {
187				if (!err)
188					err = -E2BIG;
189				seq->op->stop(seq, p);
190				goto done;
191			}
192			break;
193		}
194
195		if (can_resched)
196			cond_resched();
197	}
198stop:
199	offs = seq->count;
 
 
 
 
 
200	/* bpf program called if !p */
201	seq->op->stop(seq, p);
202	if (!p) {
203		if (!seq_has_overflowed(seq)) {
204			bpf_iter_done_stop(seq);
205		} else {
206			seq->count = offs;
207			if (offs == 0) {
208				err = -E2BIG;
209				goto done;
210			}
211		}
212	}
213
214	n = min(seq->count, size);
215	err = copy_to_user(buf, seq->buf, n);
216	if (err) {
217		err = -EFAULT;
218		goto done;
219	}
220	copied = n;
221	seq->count -= n;
222	seq->from = n;
223done:
224	if (!copied)
225		copied = err;
226	else
227		*ppos += copied;
228	mutex_unlock(&seq->lock);
229	return copied;
230}
231
232static const struct bpf_iter_seq_info *
233__get_seq_info(struct bpf_iter_link *link)
234{
235	const struct bpf_iter_seq_info *seq_info;
236
237	if (link->aux.map) {
238		seq_info = link->aux.map->ops->iter_seq_info;
239		if (seq_info)
240			return seq_info;
241	}
242
243	return link->tinfo->reg_info->seq_info;
244}
245
246static int iter_open(struct inode *inode, struct file *file)
247{
248	struct bpf_iter_link *link = inode->i_private;
249
250	return prepare_seq_file(file, link, __get_seq_info(link));
251}
252
253static int iter_release(struct inode *inode, struct file *file)
254{
255	struct bpf_iter_priv_data *iter_priv;
256	struct seq_file *seq;
257
258	seq = file->private_data;
259	if (!seq)
260		return 0;
261
262	iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
263				 target_private);
264
265	if (iter_priv->seq_info->fini_seq_private)
266		iter_priv->seq_info->fini_seq_private(seq->private);
267
268	bpf_prog_put(iter_priv->prog);
269	seq->private = iter_priv;
270
271	return seq_release_private(inode, file);
272}
273
274const struct file_operations bpf_iter_fops = {
275	.open		= iter_open,
276	.llseek		= no_llseek,
277	.read		= bpf_seq_read,
278	.release	= iter_release,
279};
280
281/* The argument reg_info will be cached in bpf_iter_target_info.
282 * The common practice is to declare target reg_info as
283 * a const static variable and passed as an argument to
284 * bpf_iter_reg_target().
285 */
286int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info)
287{
288	struct bpf_iter_target_info *tinfo;
289
290	tinfo = kzalloc(sizeof(*tinfo), GFP_KERNEL);
291	if (!tinfo)
292		return -ENOMEM;
293
294	tinfo->reg_info = reg_info;
295	INIT_LIST_HEAD(&tinfo->list);
296
297	mutex_lock(&targets_mutex);
298	list_add(&tinfo->list, &targets);
299	mutex_unlock(&targets_mutex);
300
301	return 0;
302}
303
304void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info)
305{
306	struct bpf_iter_target_info *tinfo;
307	bool found = false;
308
309	mutex_lock(&targets_mutex);
310	list_for_each_entry(tinfo, &targets, list) {
311		if (reg_info == tinfo->reg_info) {
312			list_del(&tinfo->list);
313			kfree(tinfo);
314			found = true;
315			break;
316		}
317	}
318	mutex_unlock(&targets_mutex);
319
320	WARN_ON(found == false);
321}
322
323static void cache_btf_id(struct bpf_iter_target_info *tinfo,
324			 struct bpf_prog *prog)
325{
326	tinfo->btf_id = prog->aux->attach_btf_id;
327}
328
329bool bpf_iter_prog_supported(struct bpf_prog *prog)
330{
331	const char *attach_fname = prog->aux->attach_func_name;
 
332	u32 prog_btf_id = prog->aux->attach_btf_id;
333	const char *prefix = BPF_ITER_FUNC_PREFIX;
334	struct bpf_iter_target_info *tinfo;
335	int prefix_len = strlen(prefix);
336	bool supported = false;
337
338	if (strncmp(attach_fname, prefix, prefix_len))
339		return false;
340
341	mutex_lock(&targets_mutex);
342	list_for_each_entry(tinfo, &targets, list) {
343		if (tinfo->btf_id && tinfo->btf_id == prog_btf_id) {
344			supported = true;
345			break;
346		}
347		if (!strcmp(attach_fname + prefix_len, tinfo->reg_info->target)) {
348			cache_btf_id(tinfo, prog);
349			supported = true;
350			break;
351		}
352	}
353	mutex_unlock(&targets_mutex);
354
355	if (supported) {
356		prog->aux->ctx_arg_info_size = tinfo->reg_info->ctx_arg_info_size;
357		prog->aux->ctx_arg_info = tinfo->reg_info->ctx_arg_info;
358	}
359
360	return supported;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361}
362
363static void bpf_iter_link_release(struct bpf_link *link)
364{
365	struct bpf_iter_link *iter_link =
366		container_of(link, struct bpf_iter_link, link);
367
368	if (iter_link->tinfo->reg_info->detach_target)
369		iter_link->tinfo->reg_info->detach_target(&iter_link->aux);
370}
371
372static void bpf_iter_link_dealloc(struct bpf_link *link)
373{
374	struct bpf_iter_link *iter_link =
375		container_of(link, struct bpf_iter_link, link);
376
377	kfree(iter_link);
378}
379
380static int bpf_iter_link_replace(struct bpf_link *link,
381				 struct bpf_prog *new_prog,
382				 struct bpf_prog *old_prog)
383{
384	int ret = 0;
385
386	mutex_lock(&link_mutex);
387	if (old_prog && link->prog != old_prog) {
388		ret = -EPERM;
389		goto out_unlock;
390	}
391
392	if (link->prog->type != new_prog->type ||
393	    link->prog->expected_attach_type != new_prog->expected_attach_type ||
394	    link->prog->aux->attach_btf_id != new_prog->aux->attach_btf_id) {
395		ret = -EINVAL;
396		goto out_unlock;
397	}
398
399	old_prog = xchg(&link->prog, new_prog);
400	bpf_prog_put(old_prog);
401
402out_unlock:
403	mutex_unlock(&link_mutex);
404	return ret;
405}
406
407static void bpf_iter_link_show_fdinfo(const struct bpf_link *link,
408				      struct seq_file *seq)
409{
410	struct bpf_iter_link *iter_link =
411		container_of(link, struct bpf_iter_link, link);
412	bpf_iter_show_fdinfo_t show_fdinfo;
413
414	seq_printf(seq,
415		   "target_name:\t%s\n",
416		   iter_link->tinfo->reg_info->target);
417
418	show_fdinfo = iter_link->tinfo->reg_info->show_fdinfo;
419	if (show_fdinfo)
420		show_fdinfo(&iter_link->aux, seq);
421}
422
423static int bpf_iter_link_fill_link_info(const struct bpf_link *link,
424					struct bpf_link_info *info)
425{
426	struct bpf_iter_link *iter_link =
427		container_of(link, struct bpf_iter_link, link);
428	char __user *ubuf = u64_to_user_ptr(info->iter.target_name);
429	bpf_iter_fill_link_info_t fill_link_info;
430	u32 ulen = info->iter.target_name_len;
431	const char *target_name;
432	u32 target_len;
433
434	if (!ulen ^ !ubuf)
435		return -EINVAL;
436
437	target_name = iter_link->tinfo->reg_info->target;
438	target_len =  strlen(target_name);
439	info->iter.target_name_len = target_len + 1;
440
441	if (ubuf) {
442		if (ulen >= target_len + 1) {
443			if (copy_to_user(ubuf, target_name, target_len + 1))
444				return -EFAULT;
445		} else {
446			char zero = '\0';
447
448			if (copy_to_user(ubuf, target_name, ulen - 1))
449				return -EFAULT;
450			if (put_user(zero, ubuf + ulen - 1))
451				return -EFAULT;
452			return -ENOSPC;
453		}
454	}
455
456	fill_link_info = iter_link->tinfo->reg_info->fill_link_info;
457	if (fill_link_info)
458		return fill_link_info(&iter_link->aux, info);
459
460	return 0;
461}
462
463static const struct bpf_link_ops bpf_iter_link_lops = {
464	.release = bpf_iter_link_release,
465	.dealloc = bpf_iter_link_dealloc,
466	.update_prog = bpf_iter_link_replace,
467	.show_fdinfo = bpf_iter_link_show_fdinfo,
468	.fill_link_info = bpf_iter_link_fill_link_info,
469};
470
471bool bpf_link_is_iter(struct bpf_link *link)
472{
473	return link->ops == &bpf_iter_link_lops;
474}
475
476int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
477			 struct bpf_prog *prog)
478{
 
479	struct bpf_link_primer link_primer;
480	struct bpf_iter_target_info *tinfo;
481	union bpf_iter_link_info linfo;
482	struct bpf_iter_link *link;
483	u32 prog_btf_id, linfo_len;
484	bool existed = false;
485	bpfptr_t ulinfo;
486	int err;
487
488	if (attr->link_create.target_fd || attr->link_create.flags)
489		return -EINVAL;
490
491	memset(&linfo, 0, sizeof(union bpf_iter_link_info));
492
493	ulinfo = make_bpfptr(attr->link_create.iter_info, uattr.is_kernel);
494	linfo_len = attr->link_create.iter_info_len;
495	if (bpfptr_is_null(ulinfo) ^ !linfo_len)
496		return -EINVAL;
497
498	if (!bpfptr_is_null(ulinfo)) {
499		err = bpf_check_uarg_tail_zero(ulinfo, sizeof(linfo),
500					       linfo_len);
501		if (err)
502			return err;
503		linfo_len = min_t(u32, linfo_len, sizeof(linfo));
504		if (copy_from_bpfptr(&linfo, ulinfo, linfo_len))
505			return -EFAULT;
506	}
507
508	prog_btf_id = prog->aux->attach_btf_id;
509	mutex_lock(&targets_mutex);
510	list_for_each_entry(tinfo, &targets, list) {
511		if (tinfo->btf_id == prog_btf_id) {
512			existed = true;
513			break;
514		}
515	}
516	mutex_unlock(&targets_mutex);
517	if (!existed)
518		return -ENOENT;
519
 
 
 
 
520	link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN);
521	if (!link)
522		return -ENOMEM;
523
524	bpf_link_init(&link->link, BPF_LINK_TYPE_ITER, &bpf_iter_link_lops, prog);
525	link->tinfo = tinfo;
526
527	err  = bpf_link_prime(&link->link, &link_primer);
528	if (err) {
529		kfree(link);
530		return err;
531	}
532
533	if (tinfo->reg_info->attach_target) {
534		err = tinfo->reg_info->attach_target(prog, &linfo, &link->aux);
535		if (err) {
536			bpf_link_cleanup(&link_primer);
537			return err;
538		}
539	}
540
541	return bpf_link_settle(&link_primer);
542}
543
544static void init_seq_meta(struct bpf_iter_priv_data *priv_data,
545			  struct bpf_iter_target_info *tinfo,
546			  const struct bpf_iter_seq_info *seq_info,
547			  struct bpf_prog *prog)
548{
549	priv_data->tinfo = tinfo;
550	priv_data->seq_info = seq_info;
551	priv_data->prog = prog;
552	priv_data->session_id = atomic64_inc_return(&session_id);
553	priv_data->seq_num = 0;
554	priv_data->done_stop = false;
555}
556
557static int prepare_seq_file(struct file *file, struct bpf_iter_link *link,
558			    const struct bpf_iter_seq_info *seq_info)
559{
560	struct bpf_iter_priv_data *priv_data;
561	struct bpf_iter_target_info *tinfo;
562	struct bpf_prog *prog;
563	u32 total_priv_dsize;
564	struct seq_file *seq;
565	int err = 0;
566
567	mutex_lock(&link_mutex);
568	prog = link->link.prog;
569	bpf_prog_inc(prog);
570	mutex_unlock(&link_mutex);
571
572	tinfo = link->tinfo;
573	total_priv_dsize = offsetof(struct bpf_iter_priv_data, target_private) +
574			   seq_info->seq_priv_size;
575	priv_data = __seq_open_private(file, seq_info->seq_ops,
576				       total_priv_dsize);
577	if (!priv_data) {
578		err = -ENOMEM;
579		goto release_prog;
580	}
581
582	if (seq_info->init_seq_private) {
583		err = seq_info->init_seq_private(priv_data->target_private, &link->aux);
584		if (err)
585			goto release_seq_file;
586	}
587
588	init_seq_meta(priv_data, tinfo, seq_info, prog);
589	seq = file->private_data;
590	seq->private = priv_data->target_private;
591
592	return 0;
593
594release_seq_file:
595	seq_release_private(file->f_inode, file);
596	file->private_data = NULL;
597release_prog:
598	bpf_prog_put(prog);
599	return err;
600}
601
602int bpf_iter_new_fd(struct bpf_link *link)
603{
604	struct bpf_iter_link *iter_link;
605	struct file *file;
606	unsigned int flags;
607	int err, fd;
608
609	if (link->ops != &bpf_iter_link_lops)
610		return -EINVAL;
611
612	flags = O_RDONLY | O_CLOEXEC;
613	fd = get_unused_fd_flags(flags);
614	if (fd < 0)
615		return fd;
616
617	file = anon_inode_getfile("bpf_iter", &bpf_iter_fops, NULL, flags);
618	if (IS_ERR(file)) {
619		err = PTR_ERR(file);
620		goto free_fd;
621	}
622
623	iter_link = container_of(link, struct bpf_iter_link, link);
624	err = prepare_seq_file(file, iter_link, __get_seq_info(iter_link));
625	if (err)
626		goto free_file;
627
628	fd_install(fd, file);
629	return fd;
630
631free_file:
632	fput(file);
633free_fd:
634	put_unused_fd(fd);
635	return err;
636}
637
638struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop)
639{
640	struct bpf_iter_priv_data *iter_priv;
641	struct seq_file *seq;
642	void *seq_priv;
643
644	seq = meta->seq;
645	if (seq->file->f_op != &bpf_iter_fops)
646		return NULL;
647
648	seq_priv = seq->private;
649	iter_priv = container_of(seq_priv, struct bpf_iter_priv_data,
650				 target_private);
651
652	if (in_stop && iter_priv->done_stop)
653		return NULL;
654
655	meta->session_id = iter_priv->session_id;
656	meta->seq_num = iter_priv->seq_num;
657
658	return iter_priv->prog;
659}
660
661int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx)
662{
 
663	int ret;
664
665	rcu_read_lock();
666	migrate_disable();
667	ret = BPF_PROG_RUN(prog, ctx);
668	migrate_enable();
669	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
670
671	/* bpf program can only return 0 or 1:
672	 *  0 : okay
673	 *  1 : retry the same object
674	 * The bpf_iter_run_prog() return value
675	 * will be seq_ops->show() return value.
676	 */
677	return ret == 0 ? 0 : -EAGAIN;
678}
679
680BPF_CALL_4(bpf_for_each_map_elem, struct bpf_map *, map, void *, callback_fn,
681	   void *, callback_ctx, u64, flags)
682{
683	return map->ops->map_for_each_callback(map, callback_fn, callback_ctx, flags);
684}
685
686const struct bpf_func_proto bpf_for_each_map_elem_proto = {
687	.func		= bpf_for_each_map_elem,
688	.gpl_only	= false,
689	.ret_type	= RET_INTEGER,
690	.arg1_type	= ARG_CONST_MAP_PTR,
691	.arg2_type	= ARG_PTR_TO_FUNC,
692	.arg3_type	= ARG_PTR_TO_STACK_OR_NULL,
693	.arg4_type	= ARG_ANYTHING,
694};
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2020 Facebook */
  3
  4#include <linux/fs.h>
  5#include <linux/anon_inodes.h>
  6#include <linux/filter.h>
  7#include <linux/bpf.h>
  8#include <linux/rcupdate_trace.h>
  9
 10struct bpf_iter_target_info {
 11	struct list_head list;
 12	const struct bpf_iter_reg *reg_info;
 13	u32 btf_id;	/* cached value */
 14};
 15
 16struct bpf_iter_link {
 17	struct bpf_link link;
 18	struct bpf_iter_aux_info aux;
 19	struct bpf_iter_target_info *tinfo;
 20};
 21
 22struct bpf_iter_priv_data {
 23	struct bpf_iter_target_info *tinfo;
 24	const struct bpf_iter_seq_info *seq_info;
 25	struct bpf_prog *prog;
 26	u64 session_id;
 27	u64 seq_num;
 28	bool done_stop;
 29	u8 target_private[] __aligned(8);
 30};
 31
 32static struct list_head targets = LIST_HEAD_INIT(targets);
 33static DEFINE_MUTEX(targets_mutex);
 34
 35/* protect bpf_iter_link changes */
 36static DEFINE_MUTEX(link_mutex);
 37
 38/* incremented on every opened seq_file */
 39static atomic64_t session_id;
 40
 41static int prepare_seq_file(struct file *file, struct bpf_iter_link *link,
 42			    const struct bpf_iter_seq_info *seq_info);
 43
 44static void bpf_iter_inc_seq_num(struct seq_file *seq)
 45{
 46	struct bpf_iter_priv_data *iter_priv;
 47
 48	iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
 49				 target_private);
 50	iter_priv->seq_num++;
 51}
 52
 53static void bpf_iter_dec_seq_num(struct seq_file *seq)
 54{
 55	struct bpf_iter_priv_data *iter_priv;
 56
 57	iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
 58				 target_private);
 59	iter_priv->seq_num--;
 60}
 61
 62static void bpf_iter_done_stop(struct seq_file *seq)
 63{
 64	struct bpf_iter_priv_data *iter_priv;
 65
 66	iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
 67				 target_private);
 68	iter_priv->done_stop = true;
 69}
 70
 71static inline bool bpf_iter_target_support_resched(const struct bpf_iter_target_info *tinfo)
 72{
 73	return tinfo->reg_info->feature & BPF_ITER_RESCHED;
 74}
 75
 76static bool bpf_iter_support_resched(struct seq_file *seq)
 77{
 78	struct bpf_iter_priv_data *iter_priv;
 79
 80	iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
 81				 target_private);
 82	return bpf_iter_target_support_resched(iter_priv->tinfo);
 83}
 84
 85/* maximum visited objects before bailing out */
 86#define MAX_ITER_OBJECTS	1000000
 87
 88/* bpf_seq_read, a customized and simpler version for bpf iterator.
 
 89 * The following are differences from seq_read():
 90 *  . fixed buffer size (PAGE_SIZE)
 91 *  . assuming NULL ->llseek()
 92 *  . stop() may call bpf program, handling potential overflow there
 93 */
 94static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size,
 95			    loff_t *ppos)
 96{
 97	struct seq_file *seq = file->private_data;
 98	size_t n, offs, copied = 0;
 99	int err = 0, num_objs = 0;
100	bool can_resched;
101	void *p;
102
103	mutex_lock(&seq->lock);
104
105	if (!seq->buf) {
106		seq->size = PAGE_SIZE << 3;
107		seq->buf = kvmalloc(seq->size, GFP_KERNEL);
108		if (!seq->buf) {
109			err = -ENOMEM;
110			goto done;
111		}
112	}
113
114	if (seq->count) {
115		n = min(seq->count, size);
116		err = copy_to_user(buf, seq->buf + seq->from, n);
117		if (err) {
118			err = -EFAULT;
119			goto done;
120		}
121		seq->count -= n;
122		seq->from += n;
123		copied = n;
124		goto done;
125	}
126
127	seq->from = 0;
128	p = seq->op->start(seq, &seq->index);
129	if (!p)
130		goto stop;
131	if (IS_ERR(p)) {
132		err = PTR_ERR(p);
133		seq->op->stop(seq, p);
134		seq->count = 0;
135		goto done;
136	}
137
138	err = seq->op->show(seq, p);
139	if (err > 0) {
140		/* object is skipped, decrease seq_num, so next
141		 * valid object can reuse the same seq_num.
142		 */
143		bpf_iter_dec_seq_num(seq);
144		seq->count = 0;
145	} else if (err < 0 || seq_has_overflowed(seq)) {
146		if (!err)
147			err = -E2BIG;
148		seq->op->stop(seq, p);
149		seq->count = 0;
150		goto done;
151	}
152
153	can_resched = bpf_iter_support_resched(seq);
154	while (1) {
155		loff_t pos = seq->index;
156
157		num_objs++;
158		offs = seq->count;
159		p = seq->op->next(seq, p, &seq->index);
160		if (pos == seq->index) {
161			pr_info_ratelimited("buggy seq_file .next function %ps "
162				"did not updated position index\n",
163				seq->op->next);
164			seq->index++;
165		}
166
167		if (IS_ERR_OR_NULL(p))
168			break;
169
170		/* got a valid next object, increase seq_num */
171		bpf_iter_inc_seq_num(seq);
172
173		if (seq->count >= size)
174			break;
175
176		if (num_objs >= MAX_ITER_OBJECTS) {
177			if (offs == 0) {
178				err = -EAGAIN;
179				seq->op->stop(seq, p);
180				goto done;
181			}
182			break;
183		}
184
185		err = seq->op->show(seq, p);
186		if (err > 0) {
187			bpf_iter_dec_seq_num(seq);
188			seq->count = offs;
189		} else if (err < 0 || seq_has_overflowed(seq)) {
190			seq->count = offs;
191			if (offs == 0) {
192				if (!err)
193					err = -E2BIG;
194				seq->op->stop(seq, p);
195				goto done;
196			}
197			break;
198		}
199
200		if (can_resched)
201			cond_resched();
202	}
203stop:
204	offs = seq->count;
205	if (IS_ERR(p)) {
206		seq->op->stop(seq, NULL);
207		err = PTR_ERR(p);
208		goto done;
209	}
210	/* bpf program called if !p */
211	seq->op->stop(seq, p);
212	if (!p) {
213		if (!seq_has_overflowed(seq)) {
214			bpf_iter_done_stop(seq);
215		} else {
216			seq->count = offs;
217			if (offs == 0) {
218				err = -E2BIG;
219				goto done;
220			}
221		}
222	}
223
224	n = min(seq->count, size);
225	err = copy_to_user(buf, seq->buf, n);
226	if (err) {
227		err = -EFAULT;
228		goto done;
229	}
230	copied = n;
231	seq->count -= n;
232	seq->from = n;
233done:
234	if (!copied)
235		copied = err;
236	else
237		*ppos += copied;
238	mutex_unlock(&seq->lock);
239	return copied;
240}
241
242static const struct bpf_iter_seq_info *
243__get_seq_info(struct bpf_iter_link *link)
244{
245	const struct bpf_iter_seq_info *seq_info;
246
247	if (link->aux.map) {
248		seq_info = link->aux.map->ops->iter_seq_info;
249		if (seq_info)
250			return seq_info;
251	}
252
253	return link->tinfo->reg_info->seq_info;
254}
255
256static int iter_open(struct inode *inode, struct file *file)
257{
258	struct bpf_iter_link *link = inode->i_private;
259
260	return prepare_seq_file(file, link, __get_seq_info(link));
261}
262
263static int iter_release(struct inode *inode, struct file *file)
264{
265	struct bpf_iter_priv_data *iter_priv;
266	struct seq_file *seq;
267
268	seq = file->private_data;
269	if (!seq)
270		return 0;
271
272	iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
273				 target_private);
274
275	if (iter_priv->seq_info->fini_seq_private)
276		iter_priv->seq_info->fini_seq_private(seq->private);
277
278	bpf_prog_put(iter_priv->prog);
279	seq->private = iter_priv;
280
281	return seq_release_private(inode, file);
282}
283
284const struct file_operations bpf_iter_fops = {
285	.open		= iter_open,
 
286	.read		= bpf_seq_read,
287	.release	= iter_release,
288};
289
290/* The argument reg_info will be cached in bpf_iter_target_info.
291 * The common practice is to declare target reg_info as
292 * a const static variable and passed as an argument to
293 * bpf_iter_reg_target().
294 */
295int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info)
296{
297	struct bpf_iter_target_info *tinfo;
298
299	tinfo = kzalloc(sizeof(*tinfo), GFP_KERNEL);
300	if (!tinfo)
301		return -ENOMEM;
302
303	tinfo->reg_info = reg_info;
304	INIT_LIST_HEAD(&tinfo->list);
305
306	mutex_lock(&targets_mutex);
307	list_add(&tinfo->list, &targets);
308	mutex_unlock(&targets_mutex);
309
310	return 0;
311}
312
313void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info)
314{
315	struct bpf_iter_target_info *tinfo;
316	bool found = false;
317
318	mutex_lock(&targets_mutex);
319	list_for_each_entry(tinfo, &targets, list) {
320		if (reg_info == tinfo->reg_info) {
321			list_del(&tinfo->list);
322			kfree(tinfo);
323			found = true;
324			break;
325		}
326	}
327	mutex_unlock(&targets_mutex);
328
329	WARN_ON(found == false);
330}
331
332static void cache_btf_id(struct bpf_iter_target_info *tinfo,
333			 struct bpf_prog *prog)
334{
335	tinfo->btf_id = prog->aux->attach_btf_id;
336}
337
338bool bpf_iter_prog_supported(struct bpf_prog *prog)
339{
340	const char *attach_fname = prog->aux->attach_func_name;
341	struct bpf_iter_target_info *tinfo = NULL, *iter;
342	u32 prog_btf_id = prog->aux->attach_btf_id;
343	const char *prefix = BPF_ITER_FUNC_PREFIX;
 
344	int prefix_len = strlen(prefix);
 
345
346	if (strncmp(attach_fname, prefix, prefix_len))
347		return false;
348
349	mutex_lock(&targets_mutex);
350	list_for_each_entry(iter, &targets, list) {
351		if (iter->btf_id && iter->btf_id == prog_btf_id) {
352			tinfo = iter;
353			break;
354		}
355		if (!strcmp(attach_fname + prefix_len, iter->reg_info->target)) {
356			cache_btf_id(iter, prog);
357			tinfo = iter;
358			break;
359		}
360	}
361	mutex_unlock(&targets_mutex);
362
363	if (tinfo) {
364		prog->aux->ctx_arg_info_size = tinfo->reg_info->ctx_arg_info_size;
365		prog->aux->ctx_arg_info = tinfo->reg_info->ctx_arg_info;
366	}
367
368	return tinfo != NULL;
369}
370
371const struct bpf_func_proto *
372bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
373{
374	const struct bpf_iter_target_info *tinfo;
375	const struct bpf_func_proto *fn = NULL;
376
377	mutex_lock(&targets_mutex);
378	list_for_each_entry(tinfo, &targets, list) {
379		if (tinfo->btf_id == prog->aux->attach_btf_id) {
380			const struct bpf_iter_reg *reg_info;
381
382			reg_info = tinfo->reg_info;
383			if (reg_info->get_func_proto)
384				fn = reg_info->get_func_proto(func_id, prog);
385			break;
386		}
387	}
388	mutex_unlock(&targets_mutex);
389
390	return fn;
391}
392
393static void bpf_iter_link_release(struct bpf_link *link)
394{
395	struct bpf_iter_link *iter_link =
396		container_of(link, struct bpf_iter_link, link);
397
398	if (iter_link->tinfo->reg_info->detach_target)
399		iter_link->tinfo->reg_info->detach_target(&iter_link->aux);
400}
401
402static void bpf_iter_link_dealloc(struct bpf_link *link)
403{
404	struct bpf_iter_link *iter_link =
405		container_of(link, struct bpf_iter_link, link);
406
407	kfree(iter_link);
408}
409
410static int bpf_iter_link_replace(struct bpf_link *link,
411				 struct bpf_prog *new_prog,
412				 struct bpf_prog *old_prog)
413{
414	int ret = 0;
415
416	mutex_lock(&link_mutex);
417	if (old_prog && link->prog != old_prog) {
418		ret = -EPERM;
419		goto out_unlock;
420	}
421
422	if (link->prog->type != new_prog->type ||
423	    link->prog->expected_attach_type != new_prog->expected_attach_type ||
424	    link->prog->aux->attach_btf_id != new_prog->aux->attach_btf_id) {
425		ret = -EINVAL;
426		goto out_unlock;
427	}
428
429	old_prog = xchg(&link->prog, new_prog);
430	bpf_prog_put(old_prog);
431
432out_unlock:
433	mutex_unlock(&link_mutex);
434	return ret;
435}
436
437static void bpf_iter_link_show_fdinfo(const struct bpf_link *link,
438				      struct seq_file *seq)
439{
440	struct bpf_iter_link *iter_link =
441		container_of(link, struct bpf_iter_link, link);
442	bpf_iter_show_fdinfo_t show_fdinfo;
443
444	seq_printf(seq,
445		   "target_name:\t%s\n",
446		   iter_link->tinfo->reg_info->target);
447
448	show_fdinfo = iter_link->tinfo->reg_info->show_fdinfo;
449	if (show_fdinfo)
450		show_fdinfo(&iter_link->aux, seq);
451}
452
453static int bpf_iter_link_fill_link_info(const struct bpf_link *link,
454					struct bpf_link_info *info)
455{
456	struct bpf_iter_link *iter_link =
457		container_of(link, struct bpf_iter_link, link);
458	char __user *ubuf = u64_to_user_ptr(info->iter.target_name);
459	bpf_iter_fill_link_info_t fill_link_info;
460	u32 ulen = info->iter.target_name_len;
461	const char *target_name;
462	u32 target_len;
463
464	if (!ulen ^ !ubuf)
465		return -EINVAL;
466
467	target_name = iter_link->tinfo->reg_info->target;
468	target_len =  strlen(target_name);
469	info->iter.target_name_len = target_len + 1;
470
471	if (ubuf) {
472		if (ulen >= target_len + 1) {
473			if (copy_to_user(ubuf, target_name, target_len + 1))
474				return -EFAULT;
475		} else {
476			char zero = '\0';
477
478			if (copy_to_user(ubuf, target_name, ulen - 1))
479				return -EFAULT;
480			if (put_user(zero, ubuf + ulen - 1))
481				return -EFAULT;
482			return -ENOSPC;
483		}
484	}
485
486	fill_link_info = iter_link->tinfo->reg_info->fill_link_info;
487	if (fill_link_info)
488		return fill_link_info(&iter_link->aux, info);
489
490	return 0;
491}
492
493static const struct bpf_link_ops bpf_iter_link_lops = {
494	.release = bpf_iter_link_release,
495	.dealloc = bpf_iter_link_dealloc,
496	.update_prog = bpf_iter_link_replace,
497	.show_fdinfo = bpf_iter_link_show_fdinfo,
498	.fill_link_info = bpf_iter_link_fill_link_info,
499};
500
501bool bpf_link_is_iter(struct bpf_link *link)
502{
503	return link->ops == &bpf_iter_link_lops;
504}
505
506int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
507			 struct bpf_prog *prog)
508{
509	struct bpf_iter_target_info *tinfo = NULL, *iter;
510	struct bpf_link_primer link_primer;
 
511	union bpf_iter_link_info linfo;
512	struct bpf_iter_link *link;
513	u32 prog_btf_id, linfo_len;
 
514	bpfptr_t ulinfo;
515	int err;
516
517	if (attr->link_create.target_fd || attr->link_create.flags)
518		return -EINVAL;
519
520	memset(&linfo, 0, sizeof(union bpf_iter_link_info));
521
522	ulinfo = make_bpfptr(attr->link_create.iter_info, uattr.is_kernel);
523	linfo_len = attr->link_create.iter_info_len;
524	if (bpfptr_is_null(ulinfo) ^ !linfo_len)
525		return -EINVAL;
526
527	if (!bpfptr_is_null(ulinfo)) {
528		err = bpf_check_uarg_tail_zero(ulinfo, sizeof(linfo),
529					       linfo_len);
530		if (err)
531			return err;
532		linfo_len = min_t(u32, linfo_len, sizeof(linfo));
533		if (copy_from_bpfptr(&linfo, ulinfo, linfo_len))
534			return -EFAULT;
535	}
536
537	prog_btf_id = prog->aux->attach_btf_id;
538	mutex_lock(&targets_mutex);
539	list_for_each_entry(iter, &targets, list) {
540		if (iter->btf_id == prog_btf_id) {
541			tinfo = iter;
542			break;
543		}
544	}
545	mutex_unlock(&targets_mutex);
546	if (!tinfo)
547		return -ENOENT;
548
549	/* Only allow sleepable program for resched-able iterator */
550	if (prog->sleepable && !bpf_iter_target_support_resched(tinfo))
551		return -EINVAL;
552
553	link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN);
554	if (!link)
555		return -ENOMEM;
556
557	bpf_link_init(&link->link, BPF_LINK_TYPE_ITER, &bpf_iter_link_lops, prog);
558	link->tinfo = tinfo;
559
560	err = bpf_link_prime(&link->link, &link_primer);
561	if (err) {
562		kfree(link);
563		return err;
564	}
565
566	if (tinfo->reg_info->attach_target) {
567		err = tinfo->reg_info->attach_target(prog, &linfo, &link->aux);
568		if (err) {
569			bpf_link_cleanup(&link_primer);
570			return err;
571		}
572	}
573
574	return bpf_link_settle(&link_primer);
575}
576
577static void init_seq_meta(struct bpf_iter_priv_data *priv_data,
578			  struct bpf_iter_target_info *tinfo,
579			  const struct bpf_iter_seq_info *seq_info,
580			  struct bpf_prog *prog)
581{
582	priv_data->tinfo = tinfo;
583	priv_data->seq_info = seq_info;
584	priv_data->prog = prog;
585	priv_data->session_id = atomic64_inc_return(&session_id);
586	priv_data->seq_num = 0;
587	priv_data->done_stop = false;
588}
589
590static int prepare_seq_file(struct file *file, struct bpf_iter_link *link,
591			    const struct bpf_iter_seq_info *seq_info)
592{
593	struct bpf_iter_priv_data *priv_data;
594	struct bpf_iter_target_info *tinfo;
595	struct bpf_prog *prog;
596	u32 total_priv_dsize;
597	struct seq_file *seq;
598	int err = 0;
599
600	mutex_lock(&link_mutex);
601	prog = link->link.prog;
602	bpf_prog_inc(prog);
603	mutex_unlock(&link_mutex);
604
605	tinfo = link->tinfo;
606	total_priv_dsize = offsetof(struct bpf_iter_priv_data, target_private) +
607			   seq_info->seq_priv_size;
608	priv_data = __seq_open_private(file, seq_info->seq_ops,
609				       total_priv_dsize);
610	if (!priv_data) {
611		err = -ENOMEM;
612		goto release_prog;
613	}
614
615	if (seq_info->init_seq_private) {
616		err = seq_info->init_seq_private(priv_data->target_private, &link->aux);
617		if (err)
618			goto release_seq_file;
619	}
620
621	init_seq_meta(priv_data, tinfo, seq_info, prog);
622	seq = file->private_data;
623	seq->private = priv_data->target_private;
624
625	return 0;
626
627release_seq_file:
628	seq_release_private(file->f_inode, file);
629	file->private_data = NULL;
630release_prog:
631	bpf_prog_put(prog);
632	return err;
633}
634
635int bpf_iter_new_fd(struct bpf_link *link)
636{
637	struct bpf_iter_link *iter_link;
638	struct file *file;
639	unsigned int flags;
640	int err, fd;
641
642	if (link->ops != &bpf_iter_link_lops)
643		return -EINVAL;
644
645	flags = O_RDONLY | O_CLOEXEC;
646	fd = get_unused_fd_flags(flags);
647	if (fd < 0)
648		return fd;
649
650	file = anon_inode_getfile("bpf_iter", &bpf_iter_fops, NULL, flags);
651	if (IS_ERR(file)) {
652		err = PTR_ERR(file);
653		goto free_fd;
654	}
655
656	iter_link = container_of(link, struct bpf_iter_link, link);
657	err = prepare_seq_file(file, iter_link, __get_seq_info(iter_link));
658	if (err)
659		goto free_file;
660
661	fd_install(fd, file);
662	return fd;
663
664free_file:
665	fput(file);
666free_fd:
667	put_unused_fd(fd);
668	return err;
669}
670
671struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop)
672{
673	struct bpf_iter_priv_data *iter_priv;
674	struct seq_file *seq;
675	void *seq_priv;
676
677	seq = meta->seq;
678	if (seq->file->f_op != &bpf_iter_fops)
679		return NULL;
680
681	seq_priv = seq->private;
682	iter_priv = container_of(seq_priv, struct bpf_iter_priv_data,
683				 target_private);
684
685	if (in_stop && iter_priv->done_stop)
686		return NULL;
687
688	meta->session_id = iter_priv->session_id;
689	meta->seq_num = iter_priv->seq_num;
690
691	return iter_priv->prog;
692}
693
694int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx)
695{
696	struct bpf_run_ctx run_ctx, *old_run_ctx;
697	int ret;
698
699	if (prog->sleepable) {
700		rcu_read_lock_trace();
701		migrate_disable();
702		might_fault();
703		old_run_ctx = bpf_set_run_ctx(&run_ctx);
704		ret = bpf_prog_run(prog, ctx);
705		bpf_reset_run_ctx(old_run_ctx);
706		migrate_enable();
707		rcu_read_unlock_trace();
708	} else {
709		rcu_read_lock();
710		migrate_disable();
711		old_run_ctx = bpf_set_run_ctx(&run_ctx);
712		ret = bpf_prog_run(prog, ctx);
713		bpf_reset_run_ctx(old_run_ctx);
714		migrate_enable();
715		rcu_read_unlock();
716	}
717
718	/* bpf program can only return 0 or 1:
719	 *  0 : okay
720	 *  1 : retry the same object
721	 * The bpf_iter_run_prog() return value
722	 * will be seq_ops->show() return value.
723	 */
724	return ret == 0 ? 0 : -EAGAIN;
725}
726
727BPF_CALL_4(bpf_for_each_map_elem, struct bpf_map *, map, void *, callback_fn,
728	   void *, callback_ctx, u64, flags)
729{
730	return map->ops->map_for_each_callback(map, callback_fn, callback_ctx, flags);
731}
732
733const struct bpf_func_proto bpf_for_each_map_elem_proto = {
734	.func		= bpf_for_each_map_elem,
735	.gpl_only	= false,
736	.ret_type	= RET_INTEGER,
737	.arg1_type	= ARG_CONST_MAP_PTR,
738	.arg2_type	= ARG_PTR_TO_FUNC,
739	.arg3_type	= ARG_PTR_TO_STACK_OR_NULL,
740	.arg4_type	= ARG_ANYTHING,
741};
742
743BPF_CALL_4(bpf_loop, u32, nr_loops, void *, callback_fn, void *, callback_ctx,
744	   u64, flags)
745{
746	bpf_callback_t callback = (bpf_callback_t)callback_fn;
747	u64 ret;
748	u32 i;
749
750	/* Note: these safety checks are also verified when bpf_loop
751	 * is inlined, be careful to modify this code in sync. See
752	 * function verifier.c:inline_bpf_loop.
753	 */
754	if (flags)
755		return -EINVAL;
756	if (nr_loops > BPF_MAX_LOOPS)
757		return -E2BIG;
758
759	for (i = 0; i < nr_loops; i++) {
760		ret = callback((u64)i, (u64)(long)callback_ctx, 0, 0, 0);
761		/* return value: 0 - continue, 1 - stop and return */
762		if (ret)
763			return i + 1;
764	}
765
766	return i;
767}
768
769const struct bpf_func_proto bpf_loop_proto = {
770	.func		= bpf_loop,
771	.gpl_only	= false,
772	.ret_type	= RET_INTEGER,
773	.arg1_type	= ARG_ANYTHING,
774	.arg2_type	= ARG_PTR_TO_FUNC,
775	.arg3_type	= ARG_PTR_TO_STACK_OR_NULL,
776	.arg4_type	= ARG_ANYTHING,
777};
778
779struct bpf_iter_num_kern {
780	int cur; /* current value, inclusive */
781	int end; /* final value, exclusive */
782} __aligned(8);
783
784__bpf_kfunc_start_defs();
785
786__bpf_kfunc int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end)
787{
788	struct bpf_iter_num_kern *s = (void *)it;
789
790	BUILD_BUG_ON(sizeof(struct bpf_iter_num_kern) != sizeof(struct bpf_iter_num));
791	BUILD_BUG_ON(__alignof__(struct bpf_iter_num_kern) != __alignof__(struct bpf_iter_num));
792
793	/* start == end is legit, it's an empty range and we'll just get NULL
794	 * on first (and any subsequent) bpf_iter_num_next() call
795	 */
796	if (start > end) {
797		s->cur = s->end = 0;
798		return -EINVAL;
799	}
800
801	/* avoid overflows, e.g., if start == INT_MIN and end == INT_MAX */
802	if ((s64)end - (s64)start > BPF_MAX_LOOPS) {
803		s->cur = s->end = 0;
804		return -E2BIG;
805	}
806
807	/* user will call bpf_iter_num_next() first,
808	 * which will set s->cur to exactly start value;
809	 * underflow shouldn't matter
810	 */
811	s->cur = start - 1;
812	s->end = end;
813
814	return 0;
815}
816
817__bpf_kfunc int *bpf_iter_num_next(struct bpf_iter_num* it)
818{
819	struct bpf_iter_num_kern *s = (void *)it;
820
821	/* check failed initialization or if we are done (same behavior);
822	 * need to be careful about overflow, so convert to s64 for checks,
823	 * e.g., if s->cur == s->end == INT_MAX, we can't just do
824	 * s->cur + 1 >= s->end
825	 */
826	if ((s64)(s->cur + 1) >= s->end) {
827		s->cur = s->end = 0;
828		return NULL;
829	}
830
831	s->cur++;
832
833	return &s->cur;
834}
835
836__bpf_kfunc void bpf_iter_num_destroy(struct bpf_iter_num *it)
837{
838	struct bpf_iter_num_kern *s = (void *)it;
839
840	s->cur = s->end = 0;
841}
842
843__bpf_kfunc_end_defs();