Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2017 Facebook
  3 */
  4#include <linux/bpf.h>
 
  5#include <linux/btf_ids.h>
  6#include <linux/slab.h>
 
  7#include <linux/vmalloc.h>
  8#include <linux/etherdevice.h>
  9#include <linux/filter.h>
 10#include <linux/rcupdate_trace.h>
 11#include <linux/sched/signal.h>
 12#include <net/bpf_sk_storage.h>
 13#include <net/sock.h>
 14#include <net/tcp.h>
 15#include <net/net_namespace.h>
 
 16#include <linux/error-injection.h>
 17#include <linux/smp.h>
 18#include <linux/sock_diag.h>
 
 19
 20#define CREATE_TRACE_POINTS
 21#include <trace/events/bpf_test_run.h>
 22
 23struct bpf_test_timer {
 24	enum { NO_PREEMPT, NO_MIGRATE } mode;
 25	u32 i;
 26	u64 time_start, time_spent;
 27};
 28
 29static void bpf_test_timer_enter(struct bpf_test_timer *t)
 30	__acquires(rcu)
 31{
 32	rcu_read_lock();
 33	if (t->mode == NO_PREEMPT)
 34		preempt_disable();
 35	else
 36		migrate_disable();
 37
 38	t->time_start = ktime_get_ns();
 39}
 40
 41static void bpf_test_timer_leave(struct bpf_test_timer *t)
 42	__releases(rcu)
 43{
 44	t->time_start = 0;
 45
 46	if (t->mode == NO_PREEMPT)
 47		preempt_enable();
 48	else
 49		migrate_enable();
 50	rcu_read_unlock();
 51}
 52
 53static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration)
 
 54	__must_hold(rcu)
 55{
 56	t->i++;
 57	if (t->i >= repeat) {
 58		/* We're done. */
 59		t->time_spent += ktime_get_ns() - t->time_start;
 60		do_div(t->time_spent, t->i);
 61		*duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
 62		*err = 0;
 63		goto reset;
 64	}
 65
 66	if (signal_pending(current)) {
 67		/* During iteration: we've been cancelled, abort. */
 68		*err = -EINTR;
 69		goto reset;
 70	}
 71
 72	if (need_resched()) {
 73		/* During iteration: we need to reschedule between runs. */
 74		t->time_spent += ktime_get_ns() - t->time_start;
 75		bpf_test_timer_leave(t);
 76		cond_resched();
 77		bpf_test_timer_enter(t);
 78	}
 79
 80	/* Do another round. */
 81	return true;
 82
 83reset:
 84	t->i = 0;
 85	return false;
 86}
 87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 88static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
 89			u32 *retval, u32 *time, bool xdp)
 90{
 91	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
 
 
 92	struct bpf_test_timer t = { NO_MIGRATE };
 93	enum bpf_cgroup_storage_type stype;
 94	int ret;
 95
 96	for_each_cgroup_storage_type(stype) {
 97		storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
 98		if (IS_ERR(storage[stype])) {
 99			storage[stype] = NULL;
100			for_each_cgroup_storage_type(stype)
101				bpf_cgroup_storage_free(storage[stype]);
102			return -ENOMEM;
103		}
104	}
105
106	if (!repeat)
107		repeat = 1;
108
109	bpf_test_timer_enter(&t);
 
110	do {
111		ret = bpf_cgroup_storage_set(storage);
112		if (ret)
113			break;
114
115		if (xdp)
116			*retval = bpf_prog_run_xdp(prog, ctx);
117		else
118			*retval = BPF_PROG_RUN(prog, ctx);
119
120		bpf_cgroup_storage_unset();
121	} while (bpf_test_timer_continue(&t, repeat, &ret, time));
122	bpf_test_timer_leave(&t);
123
124	for_each_cgroup_storage_type(stype)
125		bpf_cgroup_storage_free(storage[stype]);
126
127	return ret;
128}
129
130static int bpf_test_finish(const union bpf_attr *kattr,
131			   union bpf_attr __user *uattr, const void *data,
132			   u32 size, u32 retval, u32 duration)
 
133{
134	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
135	int err = -EFAULT;
136	u32 copy_size = size;
137
138	/* Clamp copy if the user has provided a size hint, but copy the full
139	 * buffer if not to retain old behaviour.
140	 */
141	if (kattr->test.data_size_out &&
142	    copy_size > kattr->test.data_size_out) {
143		copy_size = kattr->test.data_size_out;
144		err = -ENOSPC;
145	}
146
147	if (data_out && copy_to_user(data_out, data, copy_size))
148		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
150		goto out;
151	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
152		goto out;
153	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
154		goto out;
155	if (err != -ENOSPC)
156		err = 0;
157out:
158	trace_bpf_test_finish(&err);
159	return err;
160}
161
162/* Integer types of various sizes and pointer combinations cover variety of
163 * architecture dependent calling conventions. 7+ can be supported in the
164 * future.
165 */
166__diag_push();
167__diag_ignore(GCC, 8, "-Wmissing-prototypes",
168	      "Global functions as their definitions will be in vmlinux BTF");
169int noinline bpf_fentry_test1(int a)
170{
171	return a + 1;
172}
 
173
174int noinline bpf_fentry_test2(int a, u64 b)
175{
176	return a + b;
177}
178
179int noinline bpf_fentry_test3(char a, int b, u64 c)
180{
181	return a + b + c;
182}
183
184int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
185{
186	return (long)a + b + c + d;
187}
188
189int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
190{
191	return a + (long)b + c + d + e;
192}
193
194int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
195{
196	return a + (long)b + c + d + (long)e + f;
197}
198
199struct bpf_fentry_test_t {
200	struct bpf_fentry_test_t *a;
201};
202
203int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
204{
205	return (long)arg;
206}
207
208int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
209{
210	return (long)arg->a;
211}
212
213int noinline bpf_modify_return_test(int a, int *b)
214{
215	*b += 1;
216	return a + *b;
217}
218
219u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
220{
221	return a + b + c + d;
222}
223
224int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
225{
226	return a + b;
227}
228
229struct sock * noinline bpf_kfunc_call_test3(struct sock *sk)
230{
231	return sk;
232}
233
234__diag_pop();
 
 
235
236ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
 
 
 
237
238BTF_SET_START(test_sk_kfunc_ids)
239BTF_ID(func, bpf_kfunc_call_test1)
240BTF_ID(func, bpf_kfunc_call_test2)
241BTF_ID(func, bpf_kfunc_call_test3)
242BTF_SET_END(test_sk_kfunc_ids)
 
 
243
244bool bpf_prog_test_check_kfunc_call(u32 kfunc_id)
 
 
 
 
 
 
 
 
245{
246	return btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id);
 
247}
248
249static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
250			   u32 headroom, u32 tailroom)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251{
252	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
253	u32 user_size = kattr->test.data_size_in;
254	void *data;
255
256	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
257		return ERR_PTR(-EINVAL);
258
259	if (user_size > size)
260		return ERR_PTR(-EMSGSIZE);
261
 
262	data = kzalloc(size + headroom + tailroom, GFP_USER);
263	if (!data)
264		return ERR_PTR(-ENOMEM);
265
266	if (copy_from_user(data + headroom, data_in, user_size)) {
267		kfree(data);
268		return ERR_PTR(-EFAULT);
269	}
270
271	return data;
272}
273
274int bpf_prog_test_run_tracing(struct bpf_prog *prog,
275			      const union bpf_attr *kattr,
276			      union bpf_attr __user *uattr)
277{
278	struct bpf_fentry_test_t arg = {};
279	u16 side_effect = 0, ret = 0;
280	int b = 2, err = -EFAULT;
281	u32 retval = 0;
282
283	if (kattr->test.flags || kattr->test.cpu)
284		return -EINVAL;
285
286	switch (prog->expected_attach_type) {
287	case BPF_TRACE_FENTRY:
288	case BPF_TRACE_FEXIT:
289		if (bpf_fentry_test1(1) != 2 ||
290		    bpf_fentry_test2(2, 3) != 5 ||
291		    bpf_fentry_test3(4, 5, 6) != 15 ||
292		    bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
293		    bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
294		    bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
295		    bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
296		    bpf_fentry_test8(&arg) != 0)
297			goto out;
298		break;
299	case BPF_MODIFY_RETURN:
300		ret = bpf_modify_return_test(1, &b);
301		if (b != 2)
302			side_effect = 1;
303		break;
304	default:
305		goto out;
306	}
307
308	retval = ((u32)side_effect << 16) | ret;
309	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
310		goto out;
311
312	err = 0;
313out:
314	trace_bpf_test_finish(&err);
315	return err;
316}
317
318struct bpf_raw_tp_test_run_info {
319	struct bpf_prog *prog;
320	void *ctx;
321	u32 retval;
322};
323
324static void
325__bpf_prog_test_run_raw_tp(void *data)
326{
327	struct bpf_raw_tp_test_run_info *info = data;
328
329	rcu_read_lock();
330	info->retval = BPF_PROG_RUN(info->prog, info->ctx);
331	rcu_read_unlock();
332}
333
334int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
335			     const union bpf_attr *kattr,
336			     union bpf_attr __user *uattr)
337{
338	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
339	__u32 ctx_size_in = kattr->test.ctx_size_in;
340	struct bpf_raw_tp_test_run_info info;
341	int cpu = kattr->test.cpu, err = 0;
342	int current_cpu;
343
344	/* doesn't support data_in/out, ctx_out, duration, or repeat */
345	if (kattr->test.data_in || kattr->test.data_out ||
346	    kattr->test.ctx_out || kattr->test.duration ||
347	    kattr->test.repeat)
348		return -EINVAL;
349
350	if (ctx_size_in < prog->aux->max_ctx_offset ||
351	    ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
352		return -EINVAL;
353
354	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
355		return -EINVAL;
356
357	if (ctx_size_in) {
358		info.ctx = kzalloc(ctx_size_in, GFP_USER);
359		if (!info.ctx)
360			return -ENOMEM;
361		if (copy_from_user(info.ctx, ctx_in, ctx_size_in)) {
362			err = -EFAULT;
363			goto out;
364		}
365	} else {
366		info.ctx = NULL;
367	}
368
369	info.prog = prog;
370
371	current_cpu = get_cpu();
372	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
373	    cpu == current_cpu) {
374		__bpf_prog_test_run_raw_tp(&info);
375	} else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
376		/* smp_call_function_single() also checks cpu_online()
377		 * after csd_lock(). However, since cpu is from user
378		 * space, let's do an extra quick check to filter out
379		 * invalid value before smp_call_function_single().
380		 */
381		err = -ENXIO;
382	} else {
383		err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
384					       &info, 1);
385	}
386	put_cpu();
387
388	if (!err &&
389	    copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
390		err = -EFAULT;
391
392out:
393	kfree(info.ctx);
394	return err;
395}
396
397static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
398{
399	void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
400	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
401	u32 size = kattr->test.ctx_size_in;
402	void *data;
403	int err;
404
405	if (!data_in && !data_out)
406		return NULL;
407
408	data = kzalloc(max_size, GFP_USER);
409	if (!data)
410		return ERR_PTR(-ENOMEM);
411
412	if (data_in) {
413		err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
414		if (err) {
415			kfree(data);
416			return ERR_PTR(err);
417		}
418
419		size = min_t(u32, max_size, size);
420		if (copy_from_user(data, data_in, size)) {
421			kfree(data);
422			return ERR_PTR(-EFAULT);
423		}
424	}
425	return data;
426}
427
428static int bpf_ctx_finish(const union bpf_attr *kattr,
429			  union bpf_attr __user *uattr, const void *data,
430			  u32 size)
431{
432	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
433	int err = -EFAULT;
434	u32 copy_size = size;
435
436	if (!data || !data_out)
437		return 0;
438
439	if (copy_size > kattr->test.ctx_size_out) {
440		copy_size = kattr->test.ctx_size_out;
441		err = -ENOSPC;
442	}
443
444	if (copy_to_user(data_out, data, copy_size))
445		goto out;
446	if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
447		goto out;
448	if (err != -ENOSPC)
449		err = 0;
450out:
451	return err;
452}
453
454/**
455 * range_is_zero - test whether buffer is initialized
456 * @buf: buffer to check
457 * @from: check from this position
458 * @to: check up until (excluding) this position
459 *
460 * This function returns true if the there is a non-zero byte
461 * in the buf in the range [from,to).
462 */
463static inline bool range_is_zero(void *buf, size_t from, size_t to)
464{
465	return !memchr_inv((u8 *)buf + from, 0, to - from);
466}
467
468static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
469{
470	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
471
472	if (!__skb)
473		return 0;
474
475	/* make sure the fields we don't use are zeroed */
476	if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
477		return -EINVAL;
478
479	/* mark is allowed */
480
481	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
482			   offsetof(struct __sk_buff, priority)))
483		return -EINVAL;
484
485	/* priority is allowed */
486
487	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority),
488			   offsetof(struct __sk_buff, ifindex)))
489		return -EINVAL;
490
491	/* ifindex is allowed */
492
493	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
494			   offsetof(struct __sk_buff, cb)))
495		return -EINVAL;
496
497	/* cb is allowed */
498
499	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
500			   offsetof(struct __sk_buff, tstamp)))
501		return -EINVAL;
502
503	/* tstamp is allowed */
504	/* wire_len is allowed */
505	/* gso_segs is allowed */
506
507	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
508			   offsetof(struct __sk_buff, gso_size)))
509		return -EINVAL;
510
511	/* gso_size is allowed */
512
513	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
 
 
 
 
 
 
514			   sizeof(struct __sk_buff)))
515		return -EINVAL;
516
517	skb->mark = __skb->mark;
518	skb->priority = __skb->priority;
 
519	skb->tstamp = __skb->tstamp;
520	memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
521
522	if (__skb->wire_len == 0) {
523		cb->pkt_len = skb->len;
524	} else {
525		if (__skb->wire_len < skb->len ||
526		    __skb->wire_len > GSO_MAX_SIZE)
527			return -EINVAL;
528		cb->pkt_len = __skb->wire_len;
529	}
530
531	if (__skb->gso_segs > GSO_MAX_SEGS)
532		return -EINVAL;
533	skb_shinfo(skb)->gso_segs = __skb->gso_segs;
534	skb_shinfo(skb)->gso_size = __skb->gso_size;
 
535
536	return 0;
537}
538
539static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
540{
541	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
542
543	if (!__skb)
544		return;
545
546	__skb->mark = skb->mark;
547	__skb->priority = skb->priority;
 
548	__skb->ifindex = skb->dev->ifindex;
549	__skb->tstamp = skb->tstamp;
550	memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
551	__skb->wire_len = cb->pkt_len;
552	__skb->gso_segs = skb_shinfo(skb)->gso_segs;
 
553}
554
555static struct proto bpf_dummy_proto = {
556	.name   = "bpf_dummy",
557	.owner  = THIS_MODULE,
558	.obj_size = sizeof(struct sock),
559};
560
561int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
562			  union bpf_attr __user *uattr)
563{
564	bool is_l2 = false, is_direct_pkt_access = false;
565	struct net *net = current->nsproxy->net_ns;
566	struct net_device *dev = net->loopback_dev;
567	u32 size = kattr->test.data_size_in;
568	u32 repeat = kattr->test.repeat;
569	struct __sk_buff *ctx = NULL;
570	u32 retval, duration;
571	int hh_len = ETH_HLEN;
572	struct sk_buff *skb;
573	struct sock *sk;
574	void *data;
575	int ret;
576
577	if (kattr->test.flags || kattr->test.cpu)
578		return -EINVAL;
579
580	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
 
581			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
582	if (IS_ERR(data))
583		return PTR_ERR(data);
584
585	ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
586	if (IS_ERR(ctx)) {
587		kfree(data);
588		return PTR_ERR(ctx);
589	}
590
591	switch (prog->type) {
592	case BPF_PROG_TYPE_SCHED_CLS:
593	case BPF_PROG_TYPE_SCHED_ACT:
594		is_l2 = true;
595		fallthrough;
596	case BPF_PROG_TYPE_LWT_IN:
597	case BPF_PROG_TYPE_LWT_OUT:
598	case BPF_PROG_TYPE_LWT_XMIT:
599		is_direct_pkt_access = true;
600		break;
601	default:
602		break;
603	}
604
605	sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
606	if (!sk) {
607		kfree(data);
608		kfree(ctx);
609		return -ENOMEM;
610	}
611	sock_init_data(NULL, sk);
612
613	skb = build_skb(data, 0);
614	if (!skb) {
615		kfree(data);
616		kfree(ctx);
617		sk_free(sk);
618		return -ENOMEM;
619	}
620	skb->sk = sk;
621
622	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
623	__skb_put(skb, size);
624	if (ctx && ctx->ifindex > 1) {
625		dev = dev_get_by_index(net, ctx->ifindex);
626		if (!dev) {
627			ret = -ENODEV;
628			goto out;
629		}
630	}
631	skb->protocol = eth_type_trans(skb, dev);
632	skb_reset_network_header(skb);
633
634	switch (skb->protocol) {
635	case htons(ETH_P_IP):
636		sk->sk_family = AF_INET;
637		if (sizeof(struct iphdr) <= skb_headlen(skb)) {
638			sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
639			sk->sk_daddr = ip_hdr(skb)->daddr;
640		}
641		break;
642#if IS_ENABLED(CONFIG_IPV6)
643	case htons(ETH_P_IPV6):
644		sk->sk_family = AF_INET6;
645		if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
646			sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
647			sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
648		}
649		break;
650#endif
651	default:
652		break;
653	}
654
655	if (is_l2)
656		__skb_push(skb, hh_len);
657	if (is_direct_pkt_access)
658		bpf_compute_data_pointers(skb);
659	ret = convert___skb_to_skb(skb, ctx);
660	if (ret)
661		goto out;
662	ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
663	if (ret)
664		goto out;
665	if (!is_l2) {
666		if (skb_headroom(skb) < hh_len) {
667			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
668
669			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
670				ret = -ENOMEM;
671				goto out;
672			}
673		}
674		memset(__skb_push(skb, hh_len), 0, hh_len);
675	}
676	convert_skb_to___skb(skb, ctx);
677
678	size = skb->len;
679	/* bpf program can never convert linear skb to non-linear */
680	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
681		size = skb_headlen(skb);
682	ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
 
683	if (!ret)
684		ret = bpf_ctx_finish(kattr, uattr, ctx,
685				     sizeof(struct __sk_buff));
686out:
687	if (dev && dev != net->loopback_dev)
688		dev_put(dev);
689	kfree_skb(skb);
690	sk_free(sk);
691	kfree(ctx);
692	return ret;
693}
694
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
695int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
696			  union bpf_attr __user *uattr)
697{
 
698	u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
699	u32 headroom = XDP_PACKET_HEADROOM;
 
700	u32 size = kattr->test.data_size_in;
 
701	u32 repeat = kattr->test.repeat;
702	struct netdev_rx_queue *rxqueue;
 
703	struct xdp_buff xdp = {};
704	u32 retval, duration;
705	u32 max_data_sz;
706	void *data;
707	int ret;
708
709	if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
710	    prog->expected_attach_type == BPF_XDP_CPUMAP)
711		return -EINVAL;
712	if (kattr->test.ctx_in || kattr->test.ctx_out)
 
 
 
 
 
 
 
 
 
 
 
713		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
714
715	/* XDP have extra tailroom as (most) drivers use full page */
716	max_data_sz = 4096 - headroom - tailroom;
 
 
 
 
 
 
717
718	data = bpf_test_init(kattr, max_data_sz, headroom, tailroom);
719	if (IS_ERR(data))
720		return PTR_ERR(data);
 
 
721
722	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
723	xdp_init_buff(&xdp, headroom + max_data_sz + tailroom,
724		      &rxqueue->xdp_rxq);
725	xdp_prepare_buff(&xdp, data, headroom, size, true);
 
726
727	bpf_prog_change_xdp(NULL, prog);
728	ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
729	if (ret)
730		goto out;
731	if (xdp.data != data + headroom || xdp.data_end != xdp.data + size)
732		size = xdp.data_end - xdp.data;
733	ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
 
 
 
 
 
734out:
735	bpf_prog_change_xdp(prog, NULL);
 
 
 
 
736	kfree(data);
 
 
737	return ret;
738}
739
740static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
741{
742	/* make sure the fields we don't use are zeroed */
743	if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
744		return -EINVAL;
745
746	/* flags is allowed */
747
748	if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
749			   sizeof(struct bpf_flow_keys)))
750		return -EINVAL;
751
752	return 0;
753}
754
755int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
756				     const union bpf_attr *kattr,
757				     union bpf_attr __user *uattr)
758{
759	struct bpf_test_timer t = { NO_PREEMPT };
760	u32 size = kattr->test.data_size_in;
761	struct bpf_flow_dissector ctx = {};
762	u32 repeat = kattr->test.repeat;
763	struct bpf_flow_keys *user_ctx;
764	struct bpf_flow_keys flow_keys;
765	const struct ethhdr *eth;
766	unsigned int flags = 0;
767	u32 retval, duration;
768	void *data;
769	int ret;
770
771	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
772		return -EINVAL;
773
774	if (kattr->test.flags || kattr->test.cpu)
775		return -EINVAL;
776
777	if (size < ETH_HLEN)
778		return -EINVAL;
779
780	data = bpf_test_init(kattr, size, 0, 0);
781	if (IS_ERR(data))
782		return PTR_ERR(data);
783
784	eth = (struct ethhdr *)data;
785
786	if (!repeat)
787		repeat = 1;
788
789	user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
790	if (IS_ERR(user_ctx)) {
791		kfree(data);
792		return PTR_ERR(user_ctx);
793	}
794	if (user_ctx) {
795		ret = verify_user_bpf_flow_keys(user_ctx);
796		if (ret)
797			goto out;
798		flags = user_ctx->flags;
799	}
800
801	ctx.flow_keys = &flow_keys;
802	ctx.data = data;
803	ctx.data_end = (__u8 *)data + size;
804
805	bpf_test_timer_enter(&t);
806	do {
807		retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
808					  size, flags);
809	} while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
810	bpf_test_timer_leave(&t);
811
812	if (ret < 0)
813		goto out;
814
815	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
816			      retval, duration);
817	if (!ret)
818		ret = bpf_ctx_finish(kattr, uattr, user_ctx,
819				     sizeof(struct bpf_flow_keys));
820
821out:
822	kfree(user_ctx);
823	kfree(data);
824	return ret;
825}
826
827int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
828				union bpf_attr __user *uattr)
829{
830	struct bpf_test_timer t = { NO_PREEMPT };
831	struct bpf_prog_array *progs = NULL;
832	struct bpf_sk_lookup_kern ctx = {};
833	u32 repeat = kattr->test.repeat;
834	struct bpf_sk_lookup *user_ctx;
835	u32 retval, duration;
836	int ret = -EINVAL;
837
838	if (prog->type != BPF_PROG_TYPE_SK_LOOKUP)
839		return -EINVAL;
840
841	if (kattr->test.flags || kattr->test.cpu)
842		return -EINVAL;
843
844	if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
845	    kattr->test.data_size_out)
846		return -EINVAL;
847
848	if (!repeat)
849		repeat = 1;
850
851	user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
852	if (IS_ERR(user_ctx))
853		return PTR_ERR(user_ctx);
854
855	if (!user_ctx)
856		return -EINVAL;
857
858	if (user_ctx->sk)
859		goto out;
860
861	if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
862		goto out;
863
864	if (user_ctx->local_port > U16_MAX || user_ctx->remote_port > U16_MAX) {
865		ret = -ERANGE;
866		goto out;
867	}
868
869	ctx.family = (u16)user_ctx->family;
870	ctx.protocol = (u16)user_ctx->protocol;
871	ctx.dport = (u16)user_ctx->local_port;
872	ctx.sport = (__force __be16)user_ctx->remote_port;
873
874	switch (ctx.family) {
875	case AF_INET:
876		ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
877		ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
878		break;
879
880#if IS_ENABLED(CONFIG_IPV6)
881	case AF_INET6:
882		ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
883		ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
884		break;
885#endif
886
887	default:
888		ret = -EAFNOSUPPORT;
889		goto out;
890	}
891
892	progs = bpf_prog_array_alloc(1, GFP_KERNEL);
893	if (!progs) {
894		ret = -ENOMEM;
895		goto out;
896	}
897
898	progs->items[0].prog = prog;
899
900	bpf_test_timer_enter(&t);
901	do {
902		ctx.selected_sk = NULL;
903		retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, BPF_PROG_RUN);
904	} while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
905	bpf_test_timer_leave(&t);
906
907	if (ret < 0)
908		goto out;
909
910	user_ctx->cookie = 0;
911	if (ctx.selected_sk) {
912		if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
913			ret = -EOPNOTSUPP;
914			goto out;
915		}
916
917		user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
918	}
919
920	ret = bpf_test_finish(kattr, uattr, NULL, 0, retval, duration);
921	if (!ret)
922		ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
923
924out:
925	bpf_prog_array_free(progs);
926	kfree(user_ctx);
927	return ret;
928}
929
930int bpf_prog_test_run_syscall(struct bpf_prog *prog,
931			      const union bpf_attr *kattr,
932			      union bpf_attr __user *uattr)
933{
934	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
935	__u32 ctx_size_in = kattr->test.ctx_size_in;
936	void *ctx = NULL;
937	u32 retval;
938	int err = 0;
939
940	/* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
941	if (kattr->test.data_in || kattr->test.data_out ||
942	    kattr->test.ctx_out || kattr->test.duration ||
943	    kattr->test.repeat || kattr->test.flags)
 
944		return -EINVAL;
945
946	if (ctx_size_in < prog->aux->max_ctx_offset ||
947	    ctx_size_in > U16_MAX)
948		return -EINVAL;
949
950	if (ctx_size_in) {
951		ctx = kzalloc(ctx_size_in, GFP_USER);
952		if (!ctx)
953			return -ENOMEM;
954		if (copy_from_user(ctx, ctx_in, ctx_size_in)) {
955			err = -EFAULT;
956			goto out;
957		}
958	}
959
960	rcu_read_lock_trace();
961	retval = bpf_prog_run_pin_on_cpu(prog, ctx);
962	rcu_read_unlock_trace();
963
964	if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
965		err = -EFAULT;
966		goto out;
967	}
968	if (ctx_size_in)
969		if (copy_to_user(ctx_in, ctx, ctx_size_in))
970			err = -EFAULT;
971out:
972	kfree(ctx);
973	return err;
974}
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2017 Facebook
   3 */
   4#include <linux/bpf.h>
   5#include <linux/btf.h>
   6#include <linux/btf_ids.h>
   7#include <linux/slab.h>
   8#include <linux/init.h>
   9#include <linux/vmalloc.h>
  10#include <linux/etherdevice.h>
  11#include <linux/filter.h>
  12#include <linux/rcupdate_trace.h>
  13#include <linux/sched/signal.h>
  14#include <net/bpf_sk_storage.h>
  15#include <net/sock.h>
  16#include <net/tcp.h>
  17#include <net/net_namespace.h>
  18#include <net/page_pool.h>
  19#include <linux/error-injection.h>
  20#include <linux/smp.h>
  21#include <linux/sock_diag.h>
  22#include <net/xdp.h>
  23
  24#define CREATE_TRACE_POINTS
  25#include <trace/events/bpf_test_run.h>
  26
  27struct bpf_test_timer {
  28	enum { NO_PREEMPT, NO_MIGRATE } mode;
  29	u32 i;
  30	u64 time_start, time_spent;
  31};
  32
  33static void bpf_test_timer_enter(struct bpf_test_timer *t)
  34	__acquires(rcu)
  35{
  36	rcu_read_lock();
  37	if (t->mode == NO_PREEMPT)
  38		preempt_disable();
  39	else
  40		migrate_disable();
  41
  42	t->time_start = ktime_get_ns();
  43}
  44
  45static void bpf_test_timer_leave(struct bpf_test_timer *t)
  46	__releases(rcu)
  47{
  48	t->time_start = 0;
  49
  50	if (t->mode == NO_PREEMPT)
  51		preempt_enable();
  52	else
  53		migrate_enable();
  54	rcu_read_unlock();
  55}
  56
  57static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations,
  58				    u32 repeat, int *err, u32 *duration)
  59	__must_hold(rcu)
  60{
  61	t->i += iterations;
  62	if (t->i >= repeat) {
  63		/* We're done. */
  64		t->time_spent += ktime_get_ns() - t->time_start;
  65		do_div(t->time_spent, t->i);
  66		*duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
  67		*err = 0;
  68		goto reset;
  69	}
  70
  71	if (signal_pending(current)) {
  72		/* During iteration: we've been cancelled, abort. */
  73		*err = -EINTR;
  74		goto reset;
  75	}
  76
  77	if (need_resched()) {
  78		/* During iteration: we need to reschedule between runs. */
  79		t->time_spent += ktime_get_ns() - t->time_start;
  80		bpf_test_timer_leave(t);
  81		cond_resched();
  82		bpf_test_timer_enter(t);
  83	}
  84
  85	/* Do another round. */
  86	return true;
  87
  88reset:
  89	t->i = 0;
  90	return false;
  91}
  92
  93/* We put this struct at the head of each page with a context and frame
  94 * initialised when the page is allocated, so we don't have to do this on each
  95 * repetition of the test run.
  96 */
  97struct xdp_page_head {
  98	struct xdp_buff orig_ctx;
  99	struct xdp_buff ctx;
 100	struct xdp_frame frm;
 101	u8 data[];
 102};
 103
 104struct xdp_test_data {
 105	struct xdp_buff *orig_ctx;
 106	struct xdp_rxq_info rxq;
 107	struct net_device *dev;
 108	struct page_pool *pp;
 109	struct xdp_frame **frames;
 110	struct sk_buff **skbs;
 111	struct xdp_mem_info mem;
 112	u32 batch_size;
 113	u32 frame_cnt;
 114};
 115
 116#define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
 117#define TEST_XDP_MAX_BATCH 256
 118
 119static void xdp_test_run_init_page(struct page *page, void *arg)
 120{
 121	struct xdp_page_head *head = phys_to_virt(page_to_phys(page));
 122	struct xdp_buff *new_ctx, *orig_ctx;
 123	u32 headroom = XDP_PACKET_HEADROOM;
 124	struct xdp_test_data *xdp = arg;
 125	size_t frm_len, meta_len;
 126	struct xdp_frame *frm;
 127	void *data;
 128
 129	orig_ctx = xdp->orig_ctx;
 130	frm_len = orig_ctx->data_end - orig_ctx->data_meta;
 131	meta_len = orig_ctx->data - orig_ctx->data_meta;
 132	headroom -= meta_len;
 133
 134	new_ctx = &head->ctx;
 135	frm = &head->frm;
 136	data = &head->data;
 137	memcpy(data + headroom, orig_ctx->data_meta, frm_len);
 138
 139	xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq);
 140	xdp_prepare_buff(new_ctx, data, headroom, frm_len, true);
 141	new_ctx->data = new_ctx->data_meta + meta_len;
 142
 143	xdp_update_frame_from_buff(new_ctx, frm);
 144	frm->mem = new_ctx->rxq->mem;
 145
 146	memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx));
 147}
 148
 149static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
 150{
 151	struct page_pool *pp;
 152	int err = -ENOMEM;
 153	struct page_pool_params pp_params = {
 154		.order = 0,
 155		.flags = 0,
 156		.pool_size = xdp->batch_size,
 157		.nid = NUMA_NO_NODE,
 158		.init_callback = xdp_test_run_init_page,
 159		.init_arg = xdp,
 160	};
 161
 162	xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
 163	if (!xdp->frames)
 164		return -ENOMEM;
 165
 166	xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
 167	if (!xdp->skbs)
 168		goto err_skbs;
 169
 170	pp = page_pool_create(&pp_params);
 171	if (IS_ERR(pp)) {
 172		err = PTR_ERR(pp);
 173		goto err_pp;
 174	}
 175
 176	/* will copy 'mem.id' into pp->xdp_mem_id */
 177	err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
 178	if (err)
 179		goto err_mmodel;
 180
 181	xdp->pp = pp;
 182
 183	/* We create a 'fake' RXQ referencing the original dev, but with an
 184	 * xdp_mem_info pointing to our page_pool
 185	 */
 186	xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0);
 187	xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL;
 188	xdp->rxq.mem.id = pp->xdp_mem_id;
 189	xdp->dev = orig_ctx->rxq->dev;
 190	xdp->orig_ctx = orig_ctx;
 191
 192	return 0;
 193
 194err_mmodel:
 195	page_pool_destroy(pp);
 196err_pp:
 197	kvfree(xdp->skbs);
 198err_skbs:
 199	kvfree(xdp->frames);
 200	return err;
 201}
 202
 203static void xdp_test_run_teardown(struct xdp_test_data *xdp)
 204{
 205	xdp_unreg_mem_model(&xdp->mem);
 206	page_pool_destroy(xdp->pp);
 207	kfree(xdp->frames);
 208	kfree(xdp->skbs);
 209}
 210
 211static bool ctx_was_changed(struct xdp_page_head *head)
 212{
 213	return head->orig_ctx.data != head->ctx.data ||
 214		head->orig_ctx.data_meta != head->ctx.data_meta ||
 215		head->orig_ctx.data_end != head->ctx.data_end;
 216}
 217
 218static void reset_ctx(struct xdp_page_head *head)
 219{
 220	if (likely(!ctx_was_changed(head)))
 221		return;
 222
 223	head->ctx.data = head->orig_ctx.data;
 224	head->ctx.data_meta = head->orig_ctx.data_meta;
 225	head->ctx.data_end = head->orig_ctx.data_end;
 226	xdp_update_frame_from_buff(&head->ctx, &head->frm);
 227}
 228
 229static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
 230			   struct sk_buff **skbs,
 231			   struct net_device *dev)
 232{
 233	gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
 234	int i, n;
 235	LIST_HEAD(list);
 236
 237	n = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, (void **)skbs);
 238	if (unlikely(n == 0)) {
 239		for (i = 0; i < nframes; i++)
 240			xdp_return_frame(frames[i]);
 241		return -ENOMEM;
 242	}
 243
 244	for (i = 0; i < nframes; i++) {
 245		struct xdp_frame *xdpf = frames[i];
 246		struct sk_buff *skb = skbs[i];
 247
 248		skb = __xdp_build_skb_from_frame(xdpf, skb, dev);
 249		if (!skb) {
 250			xdp_return_frame(xdpf);
 251			continue;
 252		}
 253
 254		list_add_tail(&skb->list, &list);
 255	}
 256	netif_receive_skb_list(&list);
 257
 258	return 0;
 259}
 260
 261static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
 262			      u32 repeat)
 263{
 264	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
 265	int err = 0, act, ret, i, nframes = 0, batch_sz;
 266	struct xdp_frame **frames = xdp->frames;
 267	struct xdp_page_head *head;
 268	struct xdp_frame *frm;
 269	bool redirect = false;
 270	struct xdp_buff *ctx;
 271	struct page *page;
 272
 273	batch_sz = min_t(u32, repeat, xdp->batch_size);
 274
 275	local_bh_disable();
 276	xdp_set_return_frame_no_direct();
 277
 278	for (i = 0; i < batch_sz; i++) {
 279		page = page_pool_dev_alloc_pages(xdp->pp);
 280		if (!page) {
 281			err = -ENOMEM;
 282			goto out;
 283		}
 284
 285		head = phys_to_virt(page_to_phys(page));
 286		reset_ctx(head);
 287		ctx = &head->ctx;
 288		frm = &head->frm;
 289		xdp->frame_cnt++;
 290
 291		act = bpf_prog_run_xdp(prog, ctx);
 292
 293		/* if program changed pkt bounds we need to update the xdp_frame */
 294		if (unlikely(ctx_was_changed(head))) {
 295			ret = xdp_update_frame_from_buff(ctx, frm);
 296			if (ret) {
 297				xdp_return_buff(ctx);
 298				continue;
 299			}
 300		}
 301
 302		switch (act) {
 303		case XDP_TX:
 304			/* we can't do a real XDP_TX since we're not in the
 305			 * driver, so turn it into a REDIRECT back to the same
 306			 * index
 307			 */
 308			ri->tgt_index = xdp->dev->ifindex;
 309			ri->map_id = INT_MAX;
 310			ri->map_type = BPF_MAP_TYPE_UNSPEC;
 311			fallthrough;
 312		case XDP_REDIRECT:
 313			redirect = true;
 314			ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog);
 315			if (ret)
 316				xdp_return_buff(ctx);
 317			break;
 318		case XDP_PASS:
 319			frames[nframes++] = frm;
 320			break;
 321		default:
 322			bpf_warn_invalid_xdp_action(NULL, prog, act);
 323			fallthrough;
 324		case XDP_DROP:
 325			xdp_return_buff(ctx);
 326			break;
 327		}
 328	}
 329
 330out:
 331	if (redirect)
 332		xdp_do_flush();
 333	if (nframes) {
 334		ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev);
 335		if (ret)
 336			err = ret;
 337	}
 338
 339	xdp_clear_return_frame_no_direct();
 340	local_bh_enable();
 341	return err;
 342}
 343
 344static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
 345				 u32 repeat, u32 batch_size, u32 *time)
 346
 347{
 348	struct xdp_test_data xdp = { .batch_size = batch_size };
 349	struct bpf_test_timer t = { .mode = NO_MIGRATE };
 350	int ret;
 351
 352	if (!repeat)
 353		repeat = 1;
 354
 355	ret = xdp_test_run_setup(&xdp, ctx);
 356	if (ret)
 357		return ret;
 358
 359	bpf_test_timer_enter(&t);
 360	do {
 361		xdp.frame_cnt = 0;
 362		ret = xdp_test_run_batch(&xdp, prog, repeat - t.i);
 363		if (unlikely(ret < 0))
 364			break;
 365	} while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time));
 366	bpf_test_timer_leave(&t);
 367
 368	xdp_test_run_teardown(&xdp);
 369	return ret;
 370}
 371
 372static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
 373			u32 *retval, u32 *time, bool xdp)
 374{
 375	struct bpf_prog_array_item item = {.prog = prog};
 376	struct bpf_run_ctx *old_ctx;
 377	struct bpf_cg_run_ctx run_ctx;
 378	struct bpf_test_timer t = { NO_MIGRATE };
 379	enum bpf_cgroup_storage_type stype;
 380	int ret;
 381
 382	for_each_cgroup_storage_type(stype) {
 383		item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
 384		if (IS_ERR(item.cgroup_storage[stype])) {
 385			item.cgroup_storage[stype] = NULL;
 386			for_each_cgroup_storage_type(stype)
 387				bpf_cgroup_storage_free(item.cgroup_storage[stype]);
 388			return -ENOMEM;
 389		}
 390	}
 391
 392	if (!repeat)
 393		repeat = 1;
 394
 395	bpf_test_timer_enter(&t);
 396	old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
 397	do {
 398		run_ctx.prog_item = &item;
 
 
 
 399		if (xdp)
 400			*retval = bpf_prog_run_xdp(prog, ctx);
 401		else
 402			*retval = bpf_prog_run(prog, ctx);
 403	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
 404	bpf_reset_run_ctx(old_ctx);
 
 405	bpf_test_timer_leave(&t);
 406
 407	for_each_cgroup_storage_type(stype)
 408		bpf_cgroup_storage_free(item.cgroup_storage[stype]);
 409
 410	return ret;
 411}
 412
 413static int bpf_test_finish(const union bpf_attr *kattr,
 414			   union bpf_attr __user *uattr, const void *data,
 415			   struct skb_shared_info *sinfo, u32 size,
 416			   u32 retval, u32 duration)
 417{
 418	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
 419	int err = -EFAULT;
 420	u32 copy_size = size;
 421
 422	/* Clamp copy if the user has provided a size hint, but copy the full
 423	 * buffer if not to retain old behaviour.
 424	 */
 425	if (kattr->test.data_size_out &&
 426	    copy_size > kattr->test.data_size_out) {
 427		copy_size = kattr->test.data_size_out;
 428		err = -ENOSPC;
 429	}
 430
 431	if (data_out) {
 432		int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size;
 433
 434		if (len < 0) {
 435			err = -ENOSPC;
 436			goto out;
 437		}
 438
 439		if (copy_to_user(data_out, data, len))
 440			goto out;
 441
 442		if (sinfo) {
 443			int i, offset = len;
 444			u32 data_len;
 445
 446			for (i = 0; i < sinfo->nr_frags; i++) {
 447				skb_frag_t *frag = &sinfo->frags[i];
 448
 449				if (offset >= copy_size) {
 450					err = -ENOSPC;
 451					break;
 452				}
 453
 454				data_len = min_t(u32, copy_size - offset,
 455						 skb_frag_size(frag));
 456
 457				if (copy_to_user(data_out + offset,
 458						 skb_frag_address(frag),
 459						 data_len))
 460					goto out;
 461
 462				offset += data_len;
 463			}
 464		}
 465	}
 466
 467	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
 468		goto out;
 469	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
 470		goto out;
 471	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
 472		goto out;
 473	if (err != -ENOSPC)
 474		err = 0;
 475out:
 476	trace_bpf_test_finish(&err);
 477	return err;
 478}
 479
 480/* Integer types of various sizes and pointer combinations cover variety of
 481 * architecture dependent calling conventions. 7+ can be supported in the
 482 * future.
 483 */
 484__diag_push();
 485__diag_ignore_all("-Wmissing-prototypes",
 486		  "Global functions as their definitions will be in vmlinux BTF");
 487int noinline bpf_fentry_test1(int a)
 488{
 489	return a + 1;
 490}
 491EXPORT_SYMBOL_GPL(bpf_fentry_test1);
 492
 493int noinline bpf_fentry_test2(int a, u64 b)
 494{
 495	return a + b;
 496}
 497
 498int noinline bpf_fentry_test3(char a, int b, u64 c)
 499{
 500	return a + b + c;
 501}
 502
 503int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
 504{
 505	return (long)a + b + c + d;
 506}
 507
 508int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
 509{
 510	return a + (long)b + c + d + e;
 511}
 512
 513int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
 514{
 515	return a + (long)b + c + d + (long)e + f;
 516}
 517
 518struct bpf_fentry_test_t {
 519	struct bpf_fentry_test_t *a;
 520};
 521
 522int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
 523{
 524	return (long)arg;
 525}
 526
 527int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
 528{
 529	return (long)arg->a;
 530}
 531
 532int noinline bpf_modify_return_test(int a, int *b)
 533{
 534	*b += 1;
 535	return a + *b;
 536}
 537
 538u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
 539{
 540	return a + b + c + d;
 541}
 542
 543int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
 544{
 545	return a + b;
 546}
 547
 548struct sock * noinline bpf_kfunc_call_test3(struct sock *sk)
 549{
 550	return sk;
 551}
 552
 553struct prog_test_member1 {
 554	int a;
 555};
 556
 557struct prog_test_member {
 558	struct prog_test_member1 m;
 559	int c;
 560};
 561
 562struct prog_test_ref_kfunc {
 563	int a;
 564	int b;
 565	struct prog_test_member memb;
 566	struct prog_test_ref_kfunc *next;
 567	refcount_t cnt;
 568};
 569
 570static struct prog_test_ref_kfunc prog_test_struct = {
 571	.a = 42,
 572	.b = 108,
 573	.next = &prog_test_struct,
 574	.cnt = REFCOUNT_INIT(1),
 575};
 576
 577noinline struct prog_test_ref_kfunc *
 578bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
 579{
 580	refcount_inc(&prog_test_struct.cnt);
 581	return &prog_test_struct;
 582}
 583
 584noinline struct prog_test_member *
 585bpf_kfunc_call_memb_acquire(void)
 586{
 587	WARN_ON_ONCE(1);
 588	return NULL;
 589}
 590
 591noinline void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
 592{
 593	if (!p)
 594		return;
 595
 596	refcount_dec(&p->cnt);
 597}
 598
 599noinline void bpf_kfunc_call_memb_release(struct prog_test_member *p)
 600{
 601}
 602
 603noinline void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
 604{
 605	WARN_ON_ONCE(1);
 606}
 607
 608static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
 609{
 610	if (size > 2 * sizeof(int))
 611		return NULL;
 612
 613	return (int *)p;
 614}
 615
 616noinline int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size)
 617{
 618	return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
 619}
 620
 621noinline int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size)
 622{
 623	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
 624}
 625
 626/* the next 2 ones can't be really used for testing expect to ensure
 627 * that the verifier rejects the call.
 628 * Acquire functions must return struct pointers, so these ones are
 629 * failing.
 630 */
 631noinline int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size)
 632{
 633	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
 634}
 635
 636noinline void bpf_kfunc_call_int_mem_release(int *p)
 637{
 638}
 639
 640noinline struct prog_test_ref_kfunc *
 641bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b)
 642{
 643	struct prog_test_ref_kfunc *p = READ_ONCE(*pp);
 644
 645	if (!p)
 646		return NULL;
 647	refcount_inc(&p->cnt);
 648	return p;
 649}
 650
 651struct prog_test_pass1 {
 652	int x0;
 653	struct {
 654		int x1;
 655		struct {
 656			int x2;
 657			struct {
 658				int x3;
 659			};
 660		};
 661	};
 662};
 663
 664struct prog_test_pass2 {
 665	int len;
 666	short arr1[4];
 667	struct {
 668		char arr2[4];
 669		unsigned long arr3[8];
 670	} x;
 671};
 672
 673struct prog_test_fail1 {
 674	void *p;
 675	int x;
 676};
 677
 678struct prog_test_fail2 {
 679	int x8;
 680	struct prog_test_pass1 x;
 681};
 682
 683struct prog_test_fail3 {
 684	int len;
 685	char arr1[2];
 686	char arr2[];
 687};
 688
 689noinline void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
 690{
 691}
 692
 693noinline void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
 694{
 695}
 696
 697noinline void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
 698{
 699}
 700
 701noinline void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
 702{
 703}
 704
 705noinline void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
 706{
 707}
 708
 709noinline void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
 710{
 711}
 712
 713noinline void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
 714{
 715}
 716
 717noinline void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
 718{
 719}
 720
 721noinline void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
 722{
 723}
 724
 725noinline void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
 726{
 727}
 728
 729noinline void bpf_kfunc_call_test_destructive(void)
 730{
 731}
 732
 733__diag_pop();
 734
 735BTF_SET8_START(bpf_test_modify_return_ids)
 736BTF_ID_FLAGS(func, bpf_modify_return_test)
 737BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE)
 738BTF_SET8_END(bpf_test_modify_return_ids)
 739
 740static const struct btf_kfunc_id_set bpf_test_modify_return_set = {
 741	.owner = THIS_MODULE,
 742	.set   = &bpf_test_modify_return_ids,
 743};
 744
 745BTF_SET8_START(test_sk_check_kfunc_ids)
 746BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
 747BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
 748BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
 749BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
 750BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
 751BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
 752BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE)
 753BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
 754BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
 755BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
 756BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
 757BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
 758BTF_ID_FLAGS(func, bpf_kfunc_call_test_kptr_get, KF_ACQUIRE | KF_RET_NULL | KF_KPTR_GET)
 759BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
 760BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
 761BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
 762BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
 763BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
 764BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
 765BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
 766BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
 767BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
 768BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS)
 769BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
 770BTF_SET8_END(test_sk_check_kfunc_ids)
 771
 772static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
 773			   u32 size, u32 headroom, u32 tailroom)
 774{
 775	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
 
 776	void *data;
 777
 778	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
 779		return ERR_PTR(-EINVAL);
 780
 781	if (user_size > size)
 782		return ERR_PTR(-EMSGSIZE);
 783
 784	size = SKB_DATA_ALIGN(size);
 785	data = kzalloc(size + headroom + tailroom, GFP_USER);
 786	if (!data)
 787		return ERR_PTR(-ENOMEM);
 788
 789	if (copy_from_user(data + headroom, data_in, user_size)) {
 790		kfree(data);
 791		return ERR_PTR(-EFAULT);
 792	}
 793
 794	return data;
 795}
 796
 797int bpf_prog_test_run_tracing(struct bpf_prog *prog,
 798			      const union bpf_attr *kattr,
 799			      union bpf_attr __user *uattr)
 800{
 801	struct bpf_fentry_test_t arg = {};
 802	u16 side_effect = 0, ret = 0;
 803	int b = 2, err = -EFAULT;
 804	u32 retval = 0;
 805
 806	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
 807		return -EINVAL;
 808
 809	switch (prog->expected_attach_type) {
 810	case BPF_TRACE_FENTRY:
 811	case BPF_TRACE_FEXIT:
 812		if (bpf_fentry_test1(1) != 2 ||
 813		    bpf_fentry_test2(2, 3) != 5 ||
 814		    bpf_fentry_test3(4, 5, 6) != 15 ||
 815		    bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
 816		    bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
 817		    bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
 818		    bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
 819		    bpf_fentry_test8(&arg) != 0)
 820			goto out;
 821		break;
 822	case BPF_MODIFY_RETURN:
 823		ret = bpf_modify_return_test(1, &b);
 824		if (b != 2)
 825			side_effect = 1;
 826		break;
 827	default:
 828		goto out;
 829	}
 830
 831	retval = ((u32)side_effect << 16) | ret;
 832	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
 833		goto out;
 834
 835	err = 0;
 836out:
 837	trace_bpf_test_finish(&err);
 838	return err;
 839}
 840
 841struct bpf_raw_tp_test_run_info {
 842	struct bpf_prog *prog;
 843	void *ctx;
 844	u32 retval;
 845};
 846
 847static void
 848__bpf_prog_test_run_raw_tp(void *data)
 849{
 850	struct bpf_raw_tp_test_run_info *info = data;
 851
 852	rcu_read_lock();
 853	info->retval = bpf_prog_run(info->prog, info->ctx);
 854	rcu_read_unlock();
 855}
 856
 857int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
 858			     const union bpf_attr *kattr,
 859			     union bpf_attr __user *uattr)
 860{
 861	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
 862	__u32 ctx_size_in = kattr->test.ctx_size_in;
 863	struct bpf_raw_tp_test_run_info info;
 864	int cpu = kattr->test.cpu, err = 0;
 865	int current_cpu;
 866
 867	/* doesn't support data_in/out, ctx_out, duration, or repeat */
 868	if (kattr->test.data_in || kattr->test.data_out ||
 869	    kattr->test.ctx_out || kattr->test.duration ||
 870	    kattr->test.repeat || kattr->test.batch_size)
 871		return -EINVAL;
 872
 873	if (ctx_size_in < prog->aux->max_ctx_offset ||
 874	    ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
 875		return -EINVAL;
 876
 877	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
 878		return -EINVAL;
 879
 880	if (ctx_size_in) {
 881		info.ctx = memdup_user(ctx_in, ctx_size_in);
 882		if (IS_ERR(info.ctx))
 883			return PTR_ERR(info.ctx);
 
 
 
 
 884	} else {
 885		info.ctx = NULL;
 886	}
 887
 888	info.prog = prog;
 889
 890	current_cpu = get_cpu();
 891	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
 892	    cpu == current_cpu) {
 893		__bpf_prog_test_run_raw_tp(&info);
 894	} else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
 895		/* smp_call_function_single() also checks cpu_online()
 896		 * after csd_lock(). However, since cpu is from user
 897		 * space, let's do an extra quick check to filter out
 898		 * invalid value before smp_call_function_single().
 899		 */
 900		err = -ENXIO;
 901	} else {
 902		err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
 903					       &info, 1);
 904	}
 905	put_cpu();
 906
 907	if (!err &&
 908	    copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
 909		err = -EFAULT;
 910
 
 911	kfree(info.ctx);
 912	return err;
 913}
 914
 915static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
 916{
 917	void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
 918	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
 919	u32 size = kattr->test.ctx_size_in;
 920	void *data;
 921	int err;
 922
 923	if (!data_in && !data_out)
 924		return NULL;
 925
 926	data = kzalloc(max_size, GFP_USER);
 927	if (!data)
 928		return ERR_PTR(-ENOMEM);
 929
 930	if (data_in) {
 931		err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
 932		if (err) {
 933			kfree(data);
 934			return ERR_PTR(err);
 935		}
 936
 937		size = min_t(u32, max_size, size);
 938		if (copy_from_user(data, data_in, size)) {
 939			kfree(data);
 940			return ERR_PTR(-EFAULT);
 941		}
 942	}
 943	return data;
 944}
 945
 946static int bpf_ctx_finish(const union bpf_attr *kattr,
 947			  union bpf_attr __user *uattr, const void *data,
 948			  u32 size)
 949{
 950	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
 951	int err = -EFAULT;
 952	u32 copy_size = size;
 953
 954	if (!data || !data_out)
 955		return 0;
 956
 957	if (copy_size > kattr->test.ctx_size_out) {
 958		copy_size = kattr->test.ctx_size_out;
 959		err = -ENOSPC;
 960	}
 961
 962	if (copy_to_user(data_out, data, copy_size))
 963		goto out;
 964	if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
 965		goto out;
 966	if (err != -ENOSPC)
 967		err = 0;
 968out:
 969	return err;
 970}
 971
 972/**
 973 * range_is_zero - test whether buffer is initialized
 974 * @buf: buffer to check
 975 * @from: check from this position
 976 * @to: check up until (excluding) this position
 977 *
 978 * This function returns true if the there is a non-zero byte
 979 * in the buf in the range [from,to).
 980 */
 981static inline bool range_is_zero(void *buf, size_t from, size_t to)
 982{
 983	return !memchr_inv((u8 *)buf + from, 0, to - from);
 984}
 985
 986static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
 987{
 988	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
 989
 990	if (!__skb)
 991		return 0;
 992
 993	/* make sure the fields we don't use are zeroed */
 994	if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
 995		return -EINVAL;
 996
 997	/* mark is allowed */
 998
 999	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
1000			   offsetof(struct __sk_buff, priority)))
1001		return -EINVAL;
1002
1003	/* priority is allowed */
1004	/* ingress_ifindex is allowed */
 
 
 
 
1005	/* ifindex is allowed */
1006
1007	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
1008			   offsetof(struct __sk_buff, cb)))
1009		return -EINVAL;
1010
1011	/* cb is allowed */
1012
1013	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
1014			   offsetof(struct __sk_buff, tstamp)))
1015		return -EINVAL;
1016
1017	/* tstamp is allowed */
1018	/* wire_len is allowed */
1019	/* gso_segs is allowed */
1020
1021	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
1022			   offsetof(struct __sk_buff, gso_size)))
1023		return -EINVAL;
1024
1025	/* gso_size is allowed */
1026
1027	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
1028			   offsetof(struct __sk_buff, hwtstamp)))
1029		return -EINVAL;
1030
1031	/* hwtstamp is allowed */
1032
1033	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp),
1034			   sizeof(struct __sk_buff)))
1035		return -EINVAL;
1036
1037	skb->mark = __skb->mark;
1038	skb->priority = __skb->priority;
1039	skb->skb_iif = __skb->ingress_ifindex;
1040	skb->tstamp = __skb->tstamp;
1041	memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
1042
1043	if (__skb->wire_len == 0) {
1044		cb->pkt_len = skb->len;
1045	} else {
1046		if (__skb->wire_len < skb->len ||
1047		    __skb->wire_len > GSO_LEGACY_MAX_SIZE)
1048			return -EINVAL;
1049		cb->pkt_len = __skb->wire_len;
1050	}
1051
1052	if (__skb->gso_segs > GSO_MAX_SEGS)
1053		return -EINVAL;
1054	skb_shinfo(skb)->gso_segs = __skb->gso_segs;
1055	skb_shinfo(skb)->gso_size = __skb->gso_size;
1056	skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
1057
1058	return 0;
1059}
1060
1061static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
1062{
1063	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
1064
1065	if (!__skb)
1066		return;
1067
1068	__skb->mark = skb->mark;
1069	__skb->priority = skb->priority;
1070	__skb->ingress_ifindex = skb->skb_iif;
1071	__skb->ifindex = skb->dev->ifindex;
1072	__skb->tstamp = skb->tstamp;
1073	memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
1074	__skb->wire_len = cb->pkt_len;
1075	__skb->gso_segs = skb_shinfo(skb)->gso_segs;
1076	__skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
1077}
1078
1079static struct proto bpf_dummy_proto = {
1080	.name   = "bpf_dummy",
1081	.owner  = THIS_MODULE,
1082	.obj_size = sizeof(struct sock),
1083};
1084
1085int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
1086			  union bpf_attr __user *uattr)
1087{
1088	bool is_l2 = false, is_direct_pkt_access = false;
1089	struct net *net = current->nsproxy->net_ns;
1090	struct net_device *dev = net->loopback_dev;
1091	u32 size = kattr->test.data_size_in;
1092	u32 repeat = kattr->test.repeat;
1093	struct __sk_buff *ctx = NULL;
1094	u32 retval, duration;
1095	int hh_len = ETH_HLEN;
1096	struct sk_buff *skb;
1097	struct sock *sk;
1098	void *data;
1099	int ret;
1100
1101	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1102		return -EINVAL;
1103
1104	data = bpf_test_init(kattr, kattr->test.data_size_in,
1105			     size, NET_SKB_PAD + NET_IP_ALIGN,
1106			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1107	if (IS_ERR(data))
1108		return PTR_ERR(data);
1109
1110	ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
1111	if (IS_ERR(ctx)) {
1112		kfree(data);
1113		return PTR_ERR(ctx);
1114	}
1115
1116	switch (prog->type) {
1117	case BPF_PROG_TYPE_SCHED_CLS:
1118	case BPF_PROG_TYPE_SCHED_ACT:
1119		is_l2 = true;
1120		fallthrough;
1121	case BPF_PROG_TYPE_LWT_IN:
1122	case BPF_PROG_TYPE_LWT_OUT:
1123	case BPF_PROG_TYPE_LWT_XMIT:
1124		is_direct_pkt_access = true;
1125		break;
1126	default:
1127		break;
1128	}
1129
1130	sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
1131	if (!sk) {
1132		kfree(data);
1133		kfree(ctx);
1134		return -ENOMEM;
1135	}
1136	sock_init_data(NULL, sk);
1137
1138	skb = slab_build_skb(data);
1139	if (!skb) {
1140		kfree(data);
1141		kfree(ctx);
1142		sk_free(sk);
1143		return -ENOMEM;
1144	}
1145	skb->sk = sk;
1146
1147	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1148	__skb_put(skb, size);
1149	if (ctx && ctx->ifindex > 1) {
1150		dev = dev_get_by_index(net, ctx->ifindex);
1151		if (!dev) {
1152			ret = -ENODEV;
1153			goto out;
1154		}
1155	}
1156	skb->protocol = eth_type_trans(skb, dev);
1157	skb_reset_network_header(skb);
1158
1159	switch (skb->protocol) {
1160	case htons(ETH_P_IP):
1161		sk->sk_family = AF_INET;
1162		if (sizeof(struct iphdr) <= skb_headlen(skb)) {
1163			sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
1164			sk->sk_daddr = ip_hdr(skb)->daddr;
1165		}
1166		break;
1167#if IS_ENABLED(CONFIG_IPV6)
1168	case htons(ETH_P_IPV6):
1169		sk->sk_family = AF_INET6;
1170		if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
1171			sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
1172			sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
1173		}
1174		break;
1175#endif
1176	default:
1177		break;
1178	}
1179
1180	if (is_l2)
1181		__skb_push(skb, hh_len);
1182	if (is_direct_pkt_access)
1183		bpf_compute_data_pointers(skb);
1184	ret = convert___skb_to_skb(skb, ctx);
1185	if (ret)
1186		goto out;
1187	ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
1188	if (ret)
1189		goto out;
1190	if (!is_l2) {
1191		if (skb_headroom(skb) < hh_len) {
1192			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
1193
1194			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
1195				ret = -ENOMEM;
1196				goto out;
1197			}
1198		}
1199		memset(__skb_push(skb, hh_len), 0, hh_len);
1200	}
1201	convert_skb_to___skb(skb, ctx);
1202
1203	size = skb->len;
1204	/* bpf program can never convert linear skb to non-linear */
1205	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
1206		size = skb_headlen(skb);
1207	ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval,
1208			      duration);
1209	if (!ret)
1210		ret = bpf_ctx_finish(kattr, uattr, ctx,
1211				     sizeof(struct __sk_buff));
1212out:
1213	if (dev && dev != net->loopback_dev)
1214		dev_put(dev);
1215	kfree_skb(skb);
1216	sk_free(sk);
1217	kfree(ctx);
1218	return ret;
1219}
1220
1221static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
1222{
1223	unsigned int ingress_ifindex, rx_queue_index;
1224	struct netdev_rx_queue *rxqueue;
1225	struct net_device *device;
1226
1227	if (!xdp_md)
1228		return 0;
1229
1230	if (xdp_md->egress_ifindex != 0)
1231		return -EINVAL;
1232
1233	ingress_ifindex = xdp_md->ingress_ifindex;
1234	rx_queue_index = xdp_md->rx_queue_index;
1235
1236	if (!ingress_ifindex && rx_queue_index)
1237		return -EINVAL;
1238
1239	if (ingress_ifindex) {
1240		device = dev_get_by_index(current->nsproxy->net_ns,
1241					  ingress_ifindex);
1242		if (!device)
1243			return -ENODEV;
1244
1245		if (rx_queue_index >= device->real_num_rx_queues)
1246			goto free_dev;
1247
1248		rxqueue = __netif_get_rx_queue(device, rx_queue_index);
1249
1250		if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
1251			goto free_dev;
1252
1253		xdp->rxq = &rxqueue->xdp_rxq;
1254		/* The device is now tracked in the xdp->rxq for later
1255		 * dev_put()
1256		 */
1257	}
1258
1259	xdp->data = xdp->data_meta + xdp_md->data;
1260	return 0;
1261
1262free_dev:
1263	dev_put(device);
1264	return -EINVAL;
1265}
1266
1267static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
1268{
1269	if (!xdp_md)
1270		return;
1271
1272	xdp_md->data = xdp->data - xdp->data_meta;
1273	xdp_md->data_end = xdp->data_end - xdp->data_meta;
1274
1275	if (xdp_md->ingress_ifindex)
1276		dev_put(xdp->rxq->dev);
1277}
1278
1279int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1280			  union bpf_attr __user *uattr)
1281{
1282	bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES);
1283	u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1284	u32 batch_size = kattr->test.batch_size;
1285	u32 retval = 0, duration, max_data_sz;
1286	u32 size = kattr->test.data_size_in;
1287	u32 headroom = XDP_PACKET_HEADROOM;
1288	u32 repeat = kattr->test.repeat;
1289	struct netdev_rx_queue *rxqueue;
1290	struct skb_shared_info *sinfo;
1291	struct xdp_buff xdp = {};
1292	int i, ret = -EINVAL;
1293	struct xdp_md *ctx;
1294	void *data;
 
1295
1296	if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
1297	    prog->expected_attach_type == BPF_XDP_CPUMAP)
1298		return -EINVAL;
1299
1300	if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES)
1301		return -EINVAL;
1302
1303	if (do_live) {
1304		if (!batch_size)
1305			batch_size = NAPI_POLL_WEIGHT;
1306		else if (batch_size > TEST_XDP_MAX_BATCH)
1307			return -E2BIG;
1308
1309		headroom += sizeof(struct xdp_page_head);
1310	} else if (batch_size) {
1311		return -EINVAL;
1312	}
1313
1314	ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
1315	if (IS_ERR(ctx))
1316		return PTR_ERR(ctx);
1317
1318	if (ctx) {
1319		/* There can't be user provided data before the meta data */
1320		if (ctx->data_meta || ctx->data_end != size ||
1321		    ctx->data > ctx->data_end ||
1322		    unlikely(xdp_metalen_invalid(ctx->data)) ||
1323		    (do_live && (kattr->test.data_out || kattr->test.ctx_out)))
1324			goto free_ctx;
1325		/* Meta data is allocated from the headroom */
1326		headroom -= ctx->data;
1327	}
1328
 
1329	max_data_sz = 4096 - headroom - tailroom;
1330	if (size > max_data_sz) {
1331		/* disallow live data mode for jumbo frames */
1332		if (do_live)
1333			goto free_ctx;
1334		size = max_data_sz;
1335	}
1336
1337	data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
1338	if (IS_ERR(data)) {
1339		ret = PTR_ERR(data);
1340		goto free_ctx;
1341	}
1342
1343	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
1344	rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
1345	xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
1346	xdp_prepare_buff(&xdp, data, headroom, size, true);
1347	sinfo = xdp_get_shared_info_from_buff(&xdp);
1348
1349	ret = xdp_convert_md_to_buff(ctx, &xdp);
1350	if (ret)
1351		goto free_data;
1352
1353	if (unlikely(kattr->test.data_size_in > size)) {
1354		void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
1355
1356		while (size < kattr->test.data_size_in) {
1357			struct page *page;
1358			skb_frag_t *frag;
1359			u32 data_len;
1360
1361			if (sinfo->nr_frags == MAX_SKB_FRAGS) {
1362				ret = -ENOMEM;
1363				goto out;
1364			}
1365
1366			page = alloc_page(GFP_KERNEL);
1367			if (!page) {
1368				ret = -ENOMEM;
1369				goto out;
1370			}
1371
1372			frag = &sinfo->frags[sinfo->nr_frags++];
1373			__skb_frag_set_page(frag, page);
1374
1375			data_len = min_t(u32, kattr->test.data_size_in - size,
1376					 PAGE_SIZE);
1377			skb_frag_size_set(frag, data_len);
1378
1379			if (copy_from_user(page_address(page), data_in + size,
1380					   data_len)) {
1381				ret = -EFAULT;
1382				goto out;
1383			}
1384			sinfo->xdp_frags_size += data_len;
1385			size += data_len;
1386		}
1387		xdp_buff_set_frags_flag(&xdp);
1388	}
1389
1390	if (repeat > 1)
1391		bpf_prog_change_xdp(NULL, prog);
1392
1393	if (do_live)
1394		ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
1395	else
1396		ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
1397	/* We convert the xdp_buff back to an xdp_md before checking the return
1398	 * code so the reference count of any held netdevice will be decremented
1399	 * even if the test run failed.
1400	 */
1401	xdp_convert_buff_to_md(&xdp, ctx);
1402	if (ret)
1403		goto out;
1404
1405	size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
1406	ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size,
1407			      retval, duration);
1408	if (!ret)
1409		ret = bpf_ctx_finish(kattr, uattr, ctx,
1410				     sizeof(struct xdp_md));
1411
1412out:
1413	if (repeat > 1)
1414		bpf_prog_change_xdp(prog, NULL);
1415free_data:
1416	for (i = 0; i < sinfo->nr_frags; i++)
1417		__free_page(skb_frag_page(&sinfo->frags[i]));
1418	kfree(data);
1419free_ctx:
1420	kfree(ctx);
1421	return ret;
1422}
1423
1424static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
1425{
1426	/* make sure the fields we don't use are zeroed */
1427	if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
1428		return -EINVAL;
1429
1430	/* flags is allowed */
1431
1432	if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
1433			   sizeof(struct bpf_flow_keys)))
1434		return -EINVAL;
1435
1436	return 0;
1437}
1438
1439int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1440				     const union bpf_attr *kattr,
1441				     union bpf_attr __user *uattr)
1442{
1443	struct bpf_test_timer t = { NO_PREEMPT };
1444	u32 size = kattr->test.data_size_in;
1445	struct bpf_flow_dissector ctx = {};
1446	u32 repeat = kattr->test.repeat;
1447	struct bpf_flow_keys *user_ctx;
1448	struct bpf_flow_keys flow_keys;
1449	const struct ethhdr *eth;
1450	unsigned int flags = 0;
1451	u32 retval, duration;
1452	void *data;
1453	int ret;
1454
1455	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
 
 
 
1456		return -EINVAL;
1457
1458	if (size < ETH_HLEN)
1459		return -EINVAL;
1460
1461	data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0);
1462	if (IS_ERR(data))
1463		return PTR_ERR(data);
1464
1465	eth = (struct ethhdr *)data;
1466
1467	if (!repeat)
1468		repeat = 1;
1469
1470	user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
1471	if (IS_ERR(user_ctx)) {
1472		kfree(data);
1473		return PTR_ERR(user_ctx);
1474	}
1475	if (user_ctx) {
1476		ret = verify_user_bpf_flow_keys(user_ctx);
1477		if (ret)
1478			goto out;
1479		flags = user_ctx->flags;
1480	}
1481
1482	ctx.flow_keys = &flow_keys;
1483	ctx.data = data;
1484	ctx.data_end = (__u8 *)data + size;
1485
1486	bpf_test_timer_enter(&t);
1487	do {
1488		retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
1489					  size, flags);
1490	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1491	bpf_test_timer_leave(&t);
1492
1493	if (ret < 0)
1494		goto out;
1495
1496	ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL,
1497			      sizeof(flow_keys), retval, duration);
1498	if (!ret)
1499		ret = bpf_ctx_finish(kattr, uattr, user_ctx,
1500				     sizeof(struct bpf_flow_keys));
1501
1502out:
1503	kfree(user_ctx);
1504	kfree(data);
1505	return ret;
1506}
1507
1508int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
1509				union bpf_attr __user *uattr)
1510{
1511	struct bpf_test_timer t = { NO_PREEMPT };
1512	struct bpf_prog_array *progs = NULL;
1513	struct bpf_sk_lookup_kern ctx = {};
1514	u32 repeat = kattr->test.repeat;
1515	struct bpf_sk_lookup *user_ctx;
1516	u32 retval, duration;
1517	int ret = -EINVAL;
1518
1519	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
 
 
 
1520		return -EINVAL;
1521
1522	if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
1523	    kattr->test.data_size_out)
1524		return -EINVAL;
1525
1526	if (!repeat)
1527		repeat = 1;
1528
1529	user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
1530	if (IS_ERR(user_ctx))
1531		return PTR_ERR(user_ctx);
1532
1533	if (!user_ctx)
1534		return -EINVAL;
1535
1536	if (user_ctx->sk)
1537		goto out;
1538
1539	if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
1540		goto out;
1541
1542	if (user_ctx->local_port > U16_MAX) {
1543		ret = -ERANGE;
1544		goto out;
1545	}
1546
1547	ctx.family = (u16)user_ctx->family;
1548	ctx.protocol = (u16)user_ctx->protocol;
1549	ctx.dport = (u16)user_ctx->local_port;
1550	ctx.sport = user_ctx->remote_port;
1551
1552	switch (ctx.family) {
1553	case AF_INET:
1554		ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
1555		ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
1556		break;
1557
1558#if IS_ENABLED(CONFIG_IPV6)
1559	case AF_INET6:
1560		ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
1561		ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
1562		break;
1563#endif
1564
1565	default:
1566		ret = -EAFNOSUPPORT;
1567		goto out;
1568	}
1569
1570	progs = bpf_prog_array_alloc(1, GFP_KERNEL);
1571	if (!progs) {
1572		ret = -ENOMEM;
1573		goto out;
1574	}
1575
1576	progs->items[0].prog = prog;
1577
1578	bpf_test_timer_enter(&t);
1579	do {
1580		ctx.selected_sk = NULL;
1581		retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
1582	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1583	bpf_test_timer_leave(&t);
1584
1585	if (ret < 0)
1586		goto out;
1587
1588	user_ctx->cookie = 0;
1589	if (ctx.selected_sk) {
1590		if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
1591			ret = -EOPNOTSUPP;
1592			goto out;
1593		}
1594
1595		user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
1596	}
1597
1598	ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1599	if (!ret)
1600		ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
1601
1602out:
1603	bpf_prog_array_free(progs);
1604	kfree(user_ctx);
1605	return ret;
1606}
1607
1608int bpf_prog_test_run_syscall(struct bpf_prog *prog,
1609			      const union bpf_attr *kattr,
1610			      union bpf_attr __user *uattr)
1611{
1612	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
1613	__u32 ctx_size_in = kattr->test.ctx_size_in;
1614	void *ctx = NULL;
1615	u32 retval;
1616	int err = 0;
1617
1618	/* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
1619	if (kattr->test.data_in || kattr->test.data_out ||
1620	    kattr->test.ctx_out || kattr->test.duration ||
1621	    kattr->test.repeat || kattr->test.flags ||
1622	    kattr->test.batch_size)
1623		return -EINVAL;
1624
1625	if (ctx_size_in < prog->aux->max_ctx_offset ||
1626	    ctx_size_in > U16_MAX)
1627		return -EINVAL;
1628
1629	if (ctx_size_in) {
1630		ctx = memdup_user(ctx_in, ctx_size_in);
1631		if (IS_ERR(ctx))
1632			return PTR_ERR(ctx);
 
 
 
 
1633	}
1634
1635	rcu_read_lock_trace();
1636	retval = bpf_prog_run_pin_on_cpu(prog, ctx);
1637	rcu_read_unlock_trace();
1638
1639	if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
1640		err = -EFAULT;
1641		goto out;
1642	}
1643	if (ctx_size_in)
1644		if (copy_to_user(ctx_in, ctx, ctx_size_in))
1645			err = -EFAULT;
1646out:
1647	kfree(ctx);
1648	return err;
1649}
1650
1651static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = {
1652	.owner = THIS_MODULE,
1653	.set   = &test_sk_check_kfunc_ids,
1654};
1655
1656BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids)
1657BTF_ID(struct, prog_test_ref_kfunc)
1658BTF_ID(func, bpf_kfunc_call_test_release)
1659BTF_ID(struct, prog_test_member)
1660BTF_ID(func, bpf_kfunc_call_memb_release)
1661
1662static int __init bpf_prog_test_run_init(void)
1663{
1664	const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = {
1665		{
1666		  .btf_id       = bpf_prog_test_dtor_kfunc_ids[0],
1667		  .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1]
1668		},
1669		{
1670		  .btf_id	= bpf_prog_test_dtor_kfunc_ids[2],
1671		  .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3],
1672		},
1673	};
1674	int ret;
1675
1676	ret = register_btf_fmodret_id_set(&bpf_test_modify_return_set);
1677	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
1678	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
1679	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set);
1680	return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
1681						  ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
1682						  THIS_MODULE);
1683}
1684late_initcall(bpf_prog_test_run_init);