Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Berkeley Packet Filter based traffic classifier
  4 *
  5 * Might be used to classify traffic through flexible, user-defined and
  6 * possibly JIT-ed BPF filters for traffic control as an alternative to
  7 * ematches.
  8 *
  9 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
 
 
 
 
 10 */
 11
 12#include <linux/module.h>
 13#include <linux/types.h>
 14#include <linux/skbuff.h>
 15#include <linux/filter.h>
 16#include <linux/bpf.h>
 17#include <linux/idr.h>
 18
 19#include <net/rtnetlink.h>
 20#include <net/pkt_cls.h>
 21#include <net/sock.h>
 22#include <net/tc_wrapper.h>
 23
 24MODULE_LICENSE("GPL");
 25MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
 26MODULE_DESCRIPTION("TC BPF based classifier");
 27
 28#define CLS_BPF_NAME_LEN	256
 29#define CLS_BPF_SUPPORTED_GEN_FLAGS		\
 30	(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
 31
 32struct cls_bpf_head {
 33	struct list_head plist;
 34	struct idr handle_idr;
 35	struct rcu_head rcu;
 36};
 37
 38struct cls_bpf_prog {
 39	struct bpf_prog *filter;
 40	struct list_head link;
 41	struct tcf_result res;
 42	bool exts_integrated;
 43	u32 gen_flags;
 44	unsigned int in_hw_count;
 45	struct tcf_exts exts;
 46	u32 handle;
 47	u16 bpf_num_ops;
 48	struct sock_filter *bpf_ops;
 49	const char *bpf_name;
 50	struct tcf_proto *tp;
 51	struct rcu_work rwork;
 
 
 
 52};
 53
 54static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
 55	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
 56	[TCA_BPF_FLAGS]		= { .type = NLA_U32 },
 57	[TCA_BPF_FLAGS_GEN]	= { .type = NLA_U32 },
 58	[TCA_BPF_FD]		= { .type = NLA_U32 },
 59	[TCA_BPF_NAME]		= { .type = NLA_NUL_STRING,
 60				    .len = CLS_BPF_NAME_LEN },
 61	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
 62	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
 63				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
 64};
 65
 66static int cls_bpf_exec_opcode(int code)
 67{
 68	switch (code) {
 69	case TC_ACT_OK:
 70	case TC_ACT_SHOT:
 71	case TC_ACT_STOLEN:
 72	case TC_ACT_TRAP:
 73	case TC_ACT_REDIRECT:
 74	case TC_ACT_UNSPEC:
 75		return code;
 76	default:
 77		return TC_ACT_UNSPEC;
 78	}
 79}
 80
 81TC_INDIRECT_SCOPE int cls_bpf_classify(struct sk_buff *skb,
 82				       const struct tcf_proto *tp,
 83				       struct tcf_result *res)
 84{
 85	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
 86	bool at_ingress = skb_at_tc_ingress(skb);
 87	struct cls_bpf_prog *prog;
 88	int ret = -1;
 89
 
 
 90	list_for_each_entry_rcu(prog, &head->plist, link) {
 91		int filter_res;
 92
 93		qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
 94
 95		if (tc_skip_sw(prog->gen_flags)) {
 96			filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
 97		} else if (at_ingress) {
 98			/* It is safe to push/pull even if skb_shared() */
 99			__skb_push(skb, skb->mac_len);
100			bpf_compute_data_pointers(skb);
101			filter_res = bpf_prog_run(prog->filter, skb);
102			__skb_pull(skb, skb->mac_len);
103		} else {
104			bpf_compute_data_pointers(skb);
105			filter_res = bpf_prog_run(prog->filter, skb);
106		}
107		if (unlikely(!skb->tstamp && skb->tstamp_type))
108			skb->tstamp_type = SKB_CLOCK_REALTIME;
109
110		if (prog->exts_integrated) {
111			res->class   = 0;
112			res->classid = TC_H_MAJ(prog->res.classid) |
113				       qdisc_skb_cb(skb)->tc_classid;
114
115			ret = cls_bpf_exec_opcode(filter_res);
116			if (ret == TC_ACT_UNSPEC)
117				continue;
118			break;
119		}
120
121		if (filter_res == 0)
122			continue;
123		if (filter_res != -1) {
124			res->class   = 0;
125			res->classid = filter_res;
126		} else {
127			*res = prog->res;
128		}
129
130		ret = tcf_exts_exec(skb, &prog->exts, res);
131		if (ret < 0)
132			continue;
133
134		break;
135	}
 
136
137	return ret;
138}
139
140static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
141{
142	return !prog->bpf_ops;
143}
144
145static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
146			       struct cls_bpf_prog *oldprog,
147			       struct netlink_ext_ack *extack)
148{
149	struct tcf_block *block = tp->chain->block;
150	struct tc_cls_bpf_offload cls_bpf = {};
151	struct cls_bpf_prog *obj;
152	bool skip_sw;
153	int err;
154
155	skip_sw = prog && tc_skip_sw(prog->gen_flags);
156	obj = prog ?: oldprog;
157
158	tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, extack);
 
159	cls_bpf.command = TC_CLSBPF_OFFLOAD;
160	cls_bpf.exts = &obj->exts;
161	cls_bpf.prog = prog ? prog->filter : NULL;
162	cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
163	cls_bpf.name = obj->bpf_name;
164	cls_bpf.exts_integrated = obj->exts_integrated;
165
166	if (oldprog && prog)
167		err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
168					  skip_sw, &oldprog->gen_flags,
169					  &oldprog->in_hw_count,
170					  &prog->gen_flags, &prog->in_hw_count,
171					  true);
172	else if (prog)
173		err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
174				      skip_sw, &prog->gen_flags,
175				      &prog->in_hw_count, true);
176	else
177		err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
178					  skip_sw, &oldprog->gen_flags,
179					  &oldprog->in_hw_count, true);
180
181	if (prog && err) {
182		cls_bpf_offload_cmd(tp, oldprog, prog, extack);
183		return err;
184	}
185
186	if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
187		return -EINVAL;
188
189	return 0;
190}
191
192static u32 cls_bpf_flags(u32 flags)
193{
194	return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
195}
196
197static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
198			   struct cls_bpf_prog *oldprog,
199			   struct netlink_ext_ack *extack)
200{
201	if (prog && oldprog &&
202	    cls_bpf_flags(prog->gen_flags) !=
203	    cls_bpf_flags(oldprog->gen_flags))
204		return -EINVAL;
205
206	if (prog && tc_skip_hw(prog->gen_flags))
207		prog = NULL;
208	if (oldprog && tc_skip_hw(oldprog->gen_flags))
209		oldprog = NULL;
210	if (!prog && !oldprog)
211		return 0;
212
213	return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
214}
215
216static void cls_bpf_stop_offload(struct tcf_proto *tp,
217				 struct cls_bpf_prog *prog,
218				 struct netlink_ext_ack *extack)
219{
220	int err;
221
222	err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
223	if (err)
224		pr_err("Stopping hardware offload failed: %d\n", err);
225}
226
227static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
228					 struct cls_bpf_prog *prog)
229{
230	struct tcf_block *block = tp->chain->block;
231	struct tc_cls_bpf_offload cls_bpf = {};
232
233	tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
234	cls_bpf.command = TC_CLSBPF_STATS;
235	cls_bpf.exts = &prog->exts;
236	cls_bpf.prog = prog->filter;
237	cls_bpf.name = prog->bpf_name;
238	cls_bpf.exts_integrated = prog->exts_integrated;
239
240	tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, false, true);
241}
242
243static int cls_bpf_init(struct tcf_proto *tp)
244{
245	struct cls_bpf_head *head;
246
247	head = kzalloc(sizeof(*head), GFP_KERNEL);
248	if (head == NULL)
249		return -ENOBUFS;
250
251	INIT_LIST_HEAD_RCU(&head->plist);
252	idr_init(&head->handle_idr);
253	rcu_assign_pointer(tp->root, head);
254
255	return 0;
256}
257
258static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
259{
260	if (cls_bpf_is_ebpf(prog))
261		bpf_prog_put(prog->filter);
262	else
263		bpf_prog_destroy(prog->filter);
264
265	kfree(prog->bpf_name);
266	kfree(prog->bpf_ops);
267}
268
269static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
270{
271	tcf_exts_destroy(&prog->exts);
272	tcf_exts_put_net(&prog->exts);
273
274	cls_bpf_free_parms(prog);
275	kfree(prog);
276}
277
278static void cls_bpf_delete_prog_work(struct work_struct *work)
279{
280	struct cls_bpf_prog *prog = container_of(to_rcu_work(work),
281						 struct cls_bpf_prog,
282						 rwork);
283	rtnl_lock();
284	__cls_bpf_delete_prog(prog);
285	rtnl_unlock();
286}
287
 
 
 
 
 
 
 
 
288static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
289			     struct netlink_ext_ack *extack)
290{
291	struct cls_bpf_head *head = rtnl_dereference(tp->root);
292
293	idr_remove(&head->handle_idr, prog->handle);
294	cls_bpf_stop_offload(tp, prog, extack);
295	list_del_rcu(&prog->link);
296	tcf_unbind_filter(tp, &prog->res);
297	if (tcf_exts_get_net(&prog->exts))
298		tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work);
299	else
300		__cls_bpf_delete_prog(prog);
301}
302
303static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
304			  bool rtnl_held, struct netlink_ext_ack *extack)
305{
306	struct cls_bpf_head *head = rtnl_dereference(tp->root);
307
308	__cls_bpf_delete(tp, arg, extack);
309	*last = list_empty(&head->plist);
310	return 0;
311}
312
313static void cls_bpf_destroy(struct tcf_proto *tp, bool rtnl_held,
314			    struct netlink_ext_ack *extack)
315{
316	struct cls_bpf_head *head = rtnl_dereference(tp->root);
317	struct cls_bpf_prog *prog, *tmp;
318
319	list_for_each_entry_safe(prog, tmp, &head->plist, link)
320		__cls_bpf_delete(tp, prog, extack);
321
322	idr_destroy(&head->handle_idr);
323	kfree_rcu(head, rcu);
324}
325
326static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
327{
328	struct cls_bpf_head *head = rtnl_dereference(tp->root);
329	struct cls_bpf_prog *prog;
330
331	list_for_each_entry(prog, &head->plist, link) {
332		if (prog->handle == handle)
333			return prog;
334	}
335
336	return NULL;
337}
338
339static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
340{
341	struct sock_filter *bpf_ops;
342	struct sock_fprog_kern fprog_tmp;
343	struct bpf_prog *fp;
344	u16 bpf_size, bpf_num_ops;
345	int ret;
346
347	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
348	if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
349		return -EINVAL;
350
351	bpf_size = bpf_num_ops * sizeof(*bpf_ops);
352	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
353		return -EINVAL;
354
355	bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL);
356	if (bpf_ops == NULL)
357		return -ENOMEM;
358
 
 
359	fprog_tmp.len = bpf_num_ops;
360	fprog_tmp.filter = bpf_ops;
361
362	ret = bpf_prog_create(&fp, &fprog_tmp);
363	if (ret < 0) {
364		kfree(bpf_ops);
365		return ret;
366	}
367
368	prog->bpf_ops = bpf_ops;
369	prog->bpf_num_ops = bpf_num_ops;
370	prog->bpf_name = NULL;
371	prog->filter = fp;
372
373	return 0;
374}
375
376static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
377				 u32 gen_flags, const struct tcf_proto *tp)
378{
379	struct bpf_prog *fp;
380	char *name = NULL;
381	bool skip_sw;
382	u32 bpf_fd;
383
384	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
385	skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
386
387	fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
388	if (IS_ERR(fp))
389		return PTR_ERR(fp);
390
391	if (tb[TCA_BPF_NAME]) {
392		name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
393		if (!name) {
394			bpf_prog_put(fp);
395			return -ENOMEM;
396		}
397	}
398
399	prog->bpf_ops = NULL;
400	prog->bpf_name = name;
401	prog->filter = fp;
402
403	if (fp->dst_needed)
404		tcf_block_netif_keep_dst(tp->chain->block);
405
406	return 0;
407}
408
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
409static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
410			  struct tcf_proto *tp, unsigned long base,
411			  u32 handle, struct nlattr **tca,
412			  void **arg, u32 flags,
413			  struct netlink_ext_ack *extack)
414{
415	struct cls_bpf_head *head = rtnl_dereference(tp->root);
416	bool is_bpf, is_ebpf, have_exts = false;
417	struct cls_bpf_prog *oldprog = *arg;
418	struct nlattr *tb[TCA_BPF_MAX + 1];
419	bool bound_to_filter = false;
420	struct cls_bpf_prog *prog;
421	u32 gen_flags = 0;
422	int ret;
423
424	if (tca[TCA_OPTIONS] == NULL)
425		return -EINVAL;
426
427	ret = nla_parse_nested_deprecated(tb, TCA_BPF_MAX, tca[TCA_OPTIONS],
428					  bpf_policy, NULL);
429	if (ret < 0)
430		return ret;
431
432	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
433	if (!prog)
434		return -ENOBUFS;
435
436	ret = tcf_exts_init(&prog->exts, net, TCA_BPF_ACT, TCA_BPF_POLICE);
437	if (ret < 0)
438		goto errout;
439
440	if (oldprog) {
441		if (handle && oldprog->handle != handle) {
442			ret = -EINVAL;
443			goto errout;
444		}
445	}
446
447	if (handle == 0) {
448		handle = 1;
449		ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
450				    INT_MAX, GFP_KERNEL);
451	} else if (!oldprog) {
452		ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
453				    handle, GFP_KERNEL);
454	}
455
456	if (ret)
457		goto errout;
458	prog->handle = handle;
459
460	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
461	is_ebpf = tb[TCA_BPF_FD];
462	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
463		ret = -EINVAL;
464		goto errout_idr;
465	}
466
467	ret = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &prog->exts,
468				flags, extack);
469	if (ret < 0)
470		goto errout_idr;
471
472	if (tb[TCA_BPF_FLAGS]) {
473		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
474
475		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
476			ret = -EINVAL;
477			goto errout_idr;
478		}
479
480		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
481	}
482	if (tb[TCA_BPF_FLAGS_GEN]) {
483		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
484		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
485		    !tc_flags_valid(gen_flags)) {
486			ret = -EINVAL;
487			goto errout_idr;
488		}
489	}
490
491	prog->exts_integrated = have_exts;
492	prog->gen_flags = gen_flags;
493
494	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
495		cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
496	if (ret < 0)
497		goto errout_idr;
498
499	if (tb[TCA_BPF_CLASSID]) {
500		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
501		tcf_bind_filter(tp, &prog->res, base);
502		bound_to_filter = true;
503	}
504
505	ret = cls_bpf_offload(tp, prog, oldprog, extack);
506	if (ret)
507		goto errout_parms;
508
509	if (!tc_in_hw(prog->gen_flags))
510		prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
511
512	tcf_proto_update_usesw(tp, prog->gen_flags);
513
514	if (oldprog) {
515		idr_replace(&head->handle_idr, prog, handle);
516		list_replace_rcu(&oldprog->link, &prog->link);
517		tcf_unbind_filter(tp, &oldprog->res);
518		tcf_exts_get_net(&oldprog->exts);
519		tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work);
520	} else {
521		list_add_rcu(&prog->link, &head->plist);
522	}
523
524	*arg = prog;
525	return 0;
526
527errout_parms:
528	if (bound_to_filter)
529		tcf_unbind_filter(tp, &prog->res);
530	cls_bpf_free_parms(prog);
531errout_idr:
532	if (!oldprog)
533		idr_remove(&head->handle_idr, prog->handle);
534errout:
535	tcf_exts_destroy(&prog->exts);
536	kfree(prog);
537	return ret;
538}
539
540static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
541				 struct sk_buff *skb)
542{
543	struct nlattr *nla;
544
545	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
546		return -EMSGSIZE;
547
548	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
549			  sizeof(struct sock_filter));
550	if (nla == NULL)
551		return -EMSGSIZE;
552
553	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
554
555	return 0;
556}
557
558static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
559				  struct sk_buff *skb)
560{
561	struct nlattr *nla;
562
563	if (prog->bpf_name &&
564	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
565		return -EMSGSIZE;
566
567	if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
568		return -EMSGSIZE;
569
570	nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
571	if (nla == NULL)
572		return -EMSGSIZE;
573
574	memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
575
576	return 0;
577}
578
579static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
580			struct sk_buff *skb, struct tcmsg *tm, bool rtnl_held)
581{
582	struct cls_bpf_prog *prog = fh;
583	struct nlattr *nest;
584	u32 bpf_flags = 0;
585	int ret;
586
587	if (prog == NULL)
588		return skb->len;
589
590	tm->tcm_handle = prog->handle;
591
592	cls_bpf_offload_update_stats(tp, prog);
593
594	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
595	if (nest == NULL)
596		goto nla_put_failure;
597
598	if (prog->res.classid &&
599	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
600		goto nla_put_failure;
601
602	if (cls_bpf_is_ebpf(prog))
603		ret = cls_bpf_dump_ebpf_info(prog, skb);
604	else
605		ret = cls_bpf_dump_bpf_info(prog, skb);
606	if (ret)
607		goto nla_put_failure;
608
609	if (tcf_exts_dump(skb, &prog->exts) < 0)
610		goto nla_put_failure;
611
612	if (prog->exts_integrated)
613		bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
614	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
615		goto nla_put_failure;
616	if (prog->gen_flags &&
617	    nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
618		goto nla_put_failure;
619
620	nla_nest_end(skb, nest);
621
622	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
623		goto nla_put_failure;
624
625	return skb->len;
626
627nla_put_failure:
628	nla_nest_cancel(skb, nest);
629	return -1;
630}
631
632static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl,
633			       void *q, unsigned long base)
634{
635	struct cls_bpf_prog *prog = fh;
636
637	tc_cls_bind_class(classid, cl, q, &prog->res, base);
 
638}
639
640static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
641			 bool rtnl_held)
642{
643	struct cls_bpf_head *head = rtnl_dereference(tp->root);
644	struct cls_bpf_prog *prog;
645
646	list_for_each_entry(prog, &head->plist, link) {
647		if (!tc_cls_stats_dump(tp, arg, prog))
 
 
 
648			break;
 
 
 
649	}
650}
651
652static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
653			     void *cb_priv, struct netlink_ext_ack *extack)
654{
655	struct cls_bpf_head *head = rtnl_dereference(tp->root);
656	struct tcf_block *block = tp->chain->block;
657	struct tc_cls_bpf_offload cls_bpf = {};
658	struct cls_bpf_prog *prog;
659	int err;
660
661	list_for_each_entry(prog, &head->plist, link) {
662		if (tc_skip_hw(prog->gen_flags))
663			continue;
664
665		tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
666					   extack);
667		cls_bpf.command = TC_CLSBPF_OFFLOAD;
668		cls_bpf.exts = &prog->exts;
669		cls_bpf.prog = add ? prog->filter : NULL;
670		cls_bpf.oldprog = add ? NULL : prog->filter;
671		cls_bpf.name = prog->bpf_name;
672		cls_bpf.exts_integrated = prog->exts_integrated;
673
674		err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSBPF,
675					    &cls_bpf, cb_priv, &prog->gen_flags,
676					    &prog->in_hw_count);
677		if (err)
678			return err;
679	}
680
681	return 0;
682}
683
684static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
685	.kind		=	"bpf",
686	.owner		=	THIS_MODULE,
687	.classify	=	cls_bpf_classify,
688	.init		=	cls_bpf_init,
689	.destroy	=	cls_bpf_destroy,
690	.get		=	cls_bpf_get,
691	.change		=	cls_bpf_change,
692	.delete		=	cls_bpf_delete,
693	.walk		=	cls_bpf_walk,
694	.reoffload	=	cls_bpf_reoffload,
695	.dump		=	cls_bpf_dump,
696	.bind_class	=	cls_bpf_bind_class,
697};
698MODULE_ALIAS_NET_CLS("bpf");
699
700static int __init cls_bpf_init_mod(void)
701{
702	return register_tcf_proto_ops(&cls_bpf_ops);
703}
704
705static void __exit cls_bpf_exit_mod(void)
706{
707	unregister_tcf_proto_ops(&cls_bpf_ops);
708}
709
710module_init(cls_bpf_init_mod);
711module_exit(cls_bpf_exit_mod);
v4.17
 
  1/*
  2 * Berkeley Packet Filter based traffic classifier
  3 *
  4 * Might be used to classify traffic through flexible, user-defined and
  5 * possibly JIT-ed BPF filters for traffic control as an alternative to
  6 * ematches.
  7 *
  8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
  9 *
 10 * This program is free software; you can redistribute it and/or modify
 11 * it under the terms of the GNU General Public License version 2 as
 12 * published by the Free Software Foundation.
 13 */
 14
 15#include <linux/module.h>
 16#include <linux/types.h>
 17#include <linux/skbuff.h>
 18#include <linux/filter.h>
 19#include <linux/bpf.h>
 20#include <linux/idr.h>
 21
 22#include <net/rtnetlink.h>
 23#include <net/pkt_cls.h>
 24#include <net/sock.h>
 
 25
 26MODULE_LICENSE("GPL");
 27MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
 28MODULE_DESCRIPTION("TC BPF based classifier");
 29
 30#define CLS_BPF_NAME_LEN	256
 31#define CLS_BPF_SUPPORTED_GEN_FLAGS		\
 32	(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
 33
 34struct cls_bpf_head {
 35	struct list_head plist;
 36	struct idr handle_idr;
 37	struct rcu_head rcu;
 38};
 39
 40struct cls_bpf_prog {
 41	struct bpf_prog *filter;
 42	struct list_head link;
 43	struct tcf_result res;
 44	bool exts_integrated;
 45	u32 gen_flags;
 
 46	struct tcf_exts exts;
 47	u32 handle;
 48	u16 bpf_num_ops;
 49	struct sock_filter *bpf_ops;
 50	const char *bpf_name;
 51	struct tcf_proto *tp;
 52	union {
 53		struct work_struct work;
 54		struct rcu_head rcu;
 55	};
 56};
 57
 58static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
 59	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
 60	[TCA_BPF_FLAGS]		= { .type = NLA_U32 },
 61	[TCA_BPF_FLAGS_GEN]	= { .type = NLA_U32 },
 62	[TCA_BPF_FD]		= { .type = NLA_U32 },
 63	[TCA_BPF_NAME]		= { .type = NLA_NUL_STRING,
 64				    .len = CLS_BPF_NAME_LEN },
 65	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
 66	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
 67				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
 68};
 69
 70static int cls_bpf_exec_opcode(int code)
 71{
 72	switch (code) {
 73	case TC_ACT_OK:
 74	case TC_ACT_SHOT:
 75	case TC_ACT_STOLEN:
 76	case TC_ACT_TRAP:
 77	case TC_ACT_REDIRECT:
 78	case TC_ACT_UNSPEC:
 79		return code;
 80	default:
 81		return TC_ACT_UNSPEC;
 82	}
 83}
 84
 85static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 86			    struct tcf_result *res)
 
 87{
 88	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
 89	bool at_ingress = skb_at_tc_ingress(skb);
 90	struct cls_bpf_prog *prog;
 91	int ret = -1;
 92
 93	/* Needed here for accessing maps. */
 94	rcu_read_lock();
 95	list_for_each_entry_rcu(prog, &head->plist, link) {
 96		int filter_res;
 97
 98		qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
 99
100		if (tc_skip_sw(prog->gen_flags)) {
101			filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
102		} else if (at_ingress) {
103			/* It is safe to push/pull even if skb_shared() */
104			__skb_push(skb, skb->mac_len);
105			bpf_compute_data_pointers(skb);
106			filter_res = BPF_PROG_RUN(prog->filter, skb);
107			__skb_pull(skb, skb->mac_len);
108		} else {
109			bpf_compute_data_pointers(skb);
110			filter_res = BPF_PROG_RUN(prog->filter, skb);
111		}
 
 
112
113		if (prog->exts_integrated) {
114			res->class   = 0;
115			res->classid = TC_H_MAJ(prog->res.classid) |
116				       qdisc_skb_cb(skb)->tc_classid;
117
118			ret = cls_bpf_exec_opcode(filter_res);
119			if (ret == TC_ACT_UNSPEC)
120				continue;
121			break;
122		}
123
124		if (filter_res == 0)
125			continue;
126		if (filter_res != -1) {
127			res->class   = 0;
128			res->classid = filter_res;
129		} else {
130			*res = prog->res;
131		}
132
133		ret = tcf_exts_exec(skb, &prog->exts, res);
134		if (ret < 0)
135			continue;
136
137		break;
138	}
139	rcu_read_unlock();
140
141	return ret;
142}
143
144static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
145{
146	return !prog->bpf_ops;
147}
148
149static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
150			       struct cls_bpf_prog *oldprog,
151			       struct netlink_ext_ack *extack)
152{
153	struct tcf_block *block = tp->chain->block;
154	struct tc_cls_bpf_offload cls_bpf = {};
155	struct cls_bpf_prog *obj;
156	bool skip_sw;
157	int err;
158
159	skip_sw = prog && tc_skip_sw(prog->gen_flags);
160	obj = prog ?: oldprog;
161
162	tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags,
163				   extack);
164	cls_bpf.command = TC_CLSBPF_OFFLOAD;
165	cls_bpf.exts = &obj->exts;
166	cls_bpf.prog = prog ? prog->filter : NULL;
167	cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
168	cls_bpf.name = obj->bpf_name;
169	cls_bpf.exts_integrated = obj->exts_integrated;
170
171	if (oldprog)
172		tcf_block_offload_dec(block, &oldprog->gen_flags);
173
174	err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
175	if (prog) {
176		if (err < 0) {
177			cls_bpf_offload_cmd(tp, oldprog, prog, extack);
178			return err;
179		} else if (err > 0) {
180			tcf_block_offload_inc(block, &prog->gen_flags);
181		}
 
 
 
 
 
 
 
182	}
183
184	if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
185		return -EINVAL;
186
187	return 0;
188}
189
190static u32 cls_bpf_flags(u32 flags)
191{
192	return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
193}
194
195static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
196			   struct cls_bpf_prog *oldprog,
197			   struct netlink_ext_ack *extack)
198{
199	if (prog && oldprog &&
200	    cls_bpf_flags(prog->gen_flags) !=
201	    cls_bpf_flags(oldprog->gen_flags))
202		return -EINVAL;
203
204	if (prog && tc_skip_hw(prog->gen_flags))
205		prog = NULL;
206	if (oldprog && tc_skip_hw(oldprog->gen_flags))
207		oldprog = NULL;
208	if (!prog && !oldprog)
209		return 0;
210
211	return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
212}
213
214static void cls_bpf_stop_offload(struct tcf_proto *tp,
215				 struct cls_bpf_prog *prog,
216				 struct netlink_ext_ack *extack)
217{
218	int err;
219
220	err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
221	if (err)
222		pr_err("Stopping hardware offload failed: %d\n", err);
223}
224
225static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
226					 struct cls_bpf_prog *prog)
227{
228	struct tcf_block *block = tp->chain->block;
229	struct tc_cls_bpf_offload cls_bpf = {};
230
231	tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
232	cls_bpf.command = TC_CLSBPF_STATS;
233	cls_bpf.exts = &prog->exts;
234	cls_bpf.prog = prog->filter;
235	cls_bpf.name = prog->bpf_name;
236	cls_bpf.exts_integrated = prog->exts_integrated;
237
238	tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, false);
239}
240
241static int cls_bpf_init(struct tcf_proto *tp)
242{
243	struct cls_bpf_head *head;
244
245	head = kzalloc(sizeof(*head), GFP_KERNEL);
246	if (head == NULL)
247		return -ENOBUFS;
248
249	INIT_LIST_HEAD_RCU(&head->plist);
250	idr_init(&head->handle_idr);
251	rcu_assign_pointer(tp->root, head);
252
253	return 0;
254}
255
256static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
257{
258	if (cls_bpf_is_ebpf(prog))
259		bpf_prog_put(prog->filter);
260	else
261		bpf_prog_destroy(prog->filter);
262
263	kfree(prog->bpf_name);
264	kfree(prog->bpf_ops);
265}
266
267static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
268{
269	tcf_exts_destroy(&prog->exts);
270	tcf_exts_put_net(&prog->exts);
271
272	cls_bpf_free_parms(prog);
273	kfree(prog);
274}
275
276static void cls_bpf_delete_prog_work(struct work_struct *work)
277{
278	struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work);
279
 
280	rtnl_lock();
281	__cls_bpf_delete_prog(prog);
282	rtnl_unlock();
283}
284
285static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
286{
287	struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
288
289	INIT_WORK(&prog->work, cls_bpf_delete_prog_work);
290	tcf_queue_work(&prog->work);
291}
292
293static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
294			     struct netlink_ext_ack *extack)
295{
296	struct cls_bpf_head *head = rtnl_dereference(tp->root);
297
298	idr_remove(&head->handle_idr, prog->handle);
299	cls_bpf_stop_offload(tp, prog, extack);
300	list_del_rcu(&prog->link);
301	tcf_unbind_filter(tp, &prog->res);
302	if (tcf_exts_get_net(&prog->exts))
303		call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
304	else
305		__cls_bpf_delete_prog(prog);
306}
307
308static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
309			  struct netlink_ext_ack *extack)
310{
311	struct cls_bpf_head *head = rtnl_dereference(tp->root);
312
313	__cls_bpf_delete(tp, arg, extack);
314	*last = list_empty(&head->plist);
315	return 0;
316}
317
318static void cls_bpf_destroy(struct tcf_proto *tp,
319			    struct netlink_ext_ack *extack)
320{
321	struct cls_bpf_head *head = rtnl_dereference(tp->root);
322	struct cls_bpf_prog *prog, *tmp;
323
324	list_for_each_entry_safe(prog, tmp, &head->plist, link)
325		__cls_bpf_delete(tp, prog, extack);
326
327	idr_destroy(&head->handle_idr);
328	kfree_rcu(head, rcu);
329}
330
331static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
332{
333	struct cls_bpf_head *head = rtnl_dereference(tp->root);
334	struct cls_bpf_prog *prog;
335
336	list_for_each_entry(prog, &head->plist, link) {
337		if (prog->handle == handle)
338			return prog;
339	}
340
341	return NULL;
342}
343
344static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
345{
346	struct sock_filter *bpf_ops;
347	struct sock_fprog_kern fprog_tmp;
348	struct bpf_prog *fp;
349	u16 bpf_size, bpf_num_ops;
350	int ret;
351
352	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
353	if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
354		return -EINVAL;
355
356	bpf_size = bpf_num_ops * sizeof(*bpf_ops);
357	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
358		return -EINVAL;
359
360	bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
361	if (bpf_ops == NULL)
362		return -ENOMEM;
363
364	memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
365
366	fprog_tmp.len = bpf_num_ops;
367	fprog_tmp.filter = bpf_ops;
368
369	ret = bpf_prog_create(&fp, &fprog_tmp);
370	if (ret < 0) {
371		kfree(bpf_ops);
372		return ret;
373	}
374
375	prog->bpf_ops = bpf_ops;
376	prog->bpf_num_ops = bpf_num_ops;
377	prog->bpf_name = NULL;
378	prog->filter = fp;
379
380	return 0;
381}
382
383static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
384				 u32 gen_flags, const struct tcf_proto *tp)
385{
386	struct bpf_prog *fp;
387	char *name = NULL;
388	bool skip_sw;
389	u32 bpf_fd;
390
391	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
392	skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
393
394	fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
395	if (IS_ERR(fp))
396		return PTR_ERR(fp);
397
398	if (tb[TCA_BPF_NAME]) {
399		name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
400		if (!name) {
401			bpf_prog_put(fp);
402			return -ENOMEM;
403		}
404	}
405
406	prog->bpf_ops = NULL;
407	prog->bpf_name = name;
408	prog->filter = fp;
409
410	if (fp->dst_needed)
411		tcf_block_netif_keep_dst(tp->chain->block);
412
413	return 0;
414}
415
416static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
417			     struct cls_bpf_prog *prog, unsigned long base,
418			     struct nlattr **tb, struct nlattr *est, bool ovr,
419			     struct netlink_ext_ack *extack)
420{
421	bool is_bpf, is_ebpf, have_exts = false;
422	u32 gen_flags = 0;
423	int ret;
424
425	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
426	is_ebpf = tb[TCA_BPF_FD];
427	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
428		return -EINVAL;
429
430	ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr, extack);
431	if (ret < 0)
432		return ret;
433
434	if (tb[TCA_BPF_FLAGS]) {
435		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
436
437		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
438			return -EINVAL;
439
440		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
441	}
442	if (tb[TCA_BPF_FLAGS_GEN]) {
443		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
444		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
445		    !tc_flags_valid(gen_flags))
446			return -EINVAL;
447	}
448
449	prog->exts_integrated = have_exts;
450	prog->gen_flags = gen_flags;
451
452	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
453		       cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
454	if (ret < 0)
455		return ret;
456
457	if (tb[TCA_BPF_CLASSID]) {
458		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
459		tcf_bind_filter(tp, &prog->res, base);
460	}
461
462	return 0;
463}
464
465static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
466			  struct tcf_proto *tp, unsigned long base,
467			  u32 handle, struct nlattr **tca,
468			  void **arg, bool ovr, struct netlink_ext_ack *extack)
 
469{
470	struct cls_bpf_head *head = rtnl_dereference(tp->root);
 
471	struct cls_bpf_prog *oldprog = *arg;
472	struct nlattr *tb[TCA_BPF_MAX + 1];
 
473	struct cls_bpf_prog *prog;
 
474	int ret;
475
476	if (tca[TCA_OPTIONS] == NULL)
477		return -EINVAL;
478
479	ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy,
480			       NULL);
481	if (ret < 0)
482		return ret;
483
484	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
485	if (!prog)
486		return -ENOBUFS;
487
488	ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
489	if (ret < 0)
490		goto errout;
491
492	if (oldprog) {
493		if (handle && oldprog->handle != handle) {
494			ret = -EINVAL;
495			goto errout;
496		}
497	}
498
499	if (handle == 0) {
500		handle = 1;
501		ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
502				    INT_MAX, GFP_KERNEL);
503	} else if (!oldprog) {
504		ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
505				    handle, GFP_KERNEL);
506	}
507
508	if (ret)
509		goto errout;
510	prog->handle = handle;
511
512	ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr,
513				extack);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
514	if (ret < 0)
515		goto errout_idr;
516
 
 
 
 
 
 
517	ret = cls_bpf_offload(tp, prog, oldprog, extack);
518	if (ret)
519		goto errout_parms;
520
521	if (!tc_in_hw(prog->gen_flags))
522		prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
523
 
 
524	if (oldprog) {
525		idr_replace(&head->handle_idr, prog, handle);
526		list_replace_rcu(&oldprog->link, &prog->link);
527		tcf_unbind_filter(tp, &oldprog->res);
528		tcf_exts_get_net(&oldprog->exts);
529		call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
530	} else {
531		list_add_rcu(&prog->link, &head->plist);
532	}
533
534	*arg = prog;
535	return 0;
536
537errout_parms:
 
 
538	cls_bpf_free_parms(prog);
539errout_idr:
540	if (!oldprog)
541		idr_remove(&head->handle_idr, prog->handle);
542errout:
543	tcf_exts_destroy(&prog->exts);
544	kfree(prog);
545	return ret;
546}
547
548static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
549				 struct sk_buff *skb)
550{
551	struct nlattr *nla;
552
553	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
554		return -EMSGSIZE;
555
556	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
557			  sizeof(struct sock_filter));
558	if (nla == NULL)
559		return -EMSGSIZE;
560
561	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
562
563	return 0;
564}
565
566static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
567				  struct sk_buff *skb)
568{
569	struct nlattr *nla;
570
571	if (prog->bpf_name &&
572	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
573		return -EMSGSIZE;
574
575	if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
576		return -EMSGSIZE;
577
578	nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
579	if (nla == NULL)
580		return -EMSGSIZE;
581
582	memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
583
584	return 0;
585}
586
587static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
588			struct sk_buff *skb, struct tcmsg *tm)
589{
590	struct cls_bpf_prog *prog = fh;
591	struct nlattr *nest;
592	u32 bpf_flags = 0;
593	int ret;
594
595	if (prog == NULL)
596		return skb->len;
597
598	tm->tcm_handle = prog->handle;
599
600	cls_bpf_offload_update_stats(tp, prog);
601
602	nest = nla_nest_start(skb, TCA_OPTIONS);
603	if (nest == NULL)
604		goto nla_put_failure;
605
606	if (prog->res.classid &&
607	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
608		goto nla_put_failure;
609
610	if (cls_bpf_is_ebpf(prog))
611		ret = cls_bpf_dump_ebpf_info(prog, skb);
612	else
613		ret = cls_bpf_dump_bpf_info(prog, skb);
614	if (ret)
615		goto nla_put_failure;
616
617	if (tcf_exts_dump(skb, &prog->exts) < 0)
618		goto nla_put_failure;
619
620	if (prog->exts_integrated)
621		bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
622	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
623		goto nla_put_failure;
624	if (prog->gen_flags &&
625	    nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
626		goto nla_put_failure;
627
628	nla_nest_end(skb, nest);
629
630	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
631		goto nla_put_failure;
632
633	return skb->len;
634
635nla_put_failure:
636	nla_nest_cancel(skb, nest);
637	return -1;
638}
639
640static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl)
 
641{
642	struct cls_bpf_prog *prog = fh;
643
644	if (prog && prog->res.classid == classid)
645		prog->res.class = cl;
646}
647
648static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 
649{
650	struct cls_bpf_head *head = rtnl_dereference(tp->root);
651	struct cls_bpf_prog *prog;
652
653	list_for_each_entry(prog, &head->plist, link) {
654		if (arg->count < arg->skip)
655			goto skip;
656		if (arg->fn(tp, prog, arg) < 0) {
657			arg->stop = 1;
658			break;
659		}
660skip:
661		arg->count++;
662	}
663}
664
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
665static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
666	.kind		=	"bpf",
667	.owner		=	THIS_MODULE,
668	.classify	=	cls_bpf_classify,
669	.init		=	cls_bpf_init,
670	.destroy	=	cls_bpf_destroy,
671	.get		=	cls_bpf_get,
672	.change		=	cls_bpf_change,
673	.delete		=	cls_bpf_delete,
674	.walk		=	cls_bpf_walk,
 
675	.dump		=	cls_bpf_dump,
676	.bind_class	=	cls_bpf_bind_class,
677};
 
678
679static int __init cls_bpf_init_mod(void)
680{
681	return register_tcf_proto_ops(&cls_bpf_ops);
682}
683
684static void __exit cls_bpf_exit_mod(void)
685{
686	unregister_tcf_proto_ops(&cls_bpf_ops);
687}
688
689module_init(cls_bpf_init_mod);
690module_exit(cls_bpf_exit_mod);