Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Berkeley Packet Filter based traffic classifier
  4 *
  5 * Might be used to classify traffic through flexible, user-defined and
  6 * possibly JIT-ed BPF filters for traffic control as an alternative to
  7 * ematches.
  8 *
  9 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
 
 
 
 
 10 */
 11
 12#include <linux/module.h>
 13#include <linux/types.h>
 14#include <linux/skbuff.h>
 15#include <linux/filter.h>
 16#include <linux/bpf.h>
 17#include <linux/idr.h>
 18
 19#include <net/rtnetlink.h>
 20#include <net/pkt_cls.h>
 21#include <net/sock.h>
 22
 23MODULE_LICENSE("GPL");
 24MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
 25MODULE_DESCRIPTION("TC BPF based classifier");
 26
 27#define CLS_BPF_NAME_LEN	256
 28#define CLS_BPF_SUPPORTED_GEN_FLAGS		\
 29	(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
 30
 31struct cls_bpf_head {
 32	struct list_head plist;
 33	struct idr handle_idr;
 34	struct rcu_head rcu;
 35};
 36
 37struct cls_bpf_prog {
 38	struct bpf_prog *filter;
 39	struct list_head link;
 40	struct tcf_result res;
 41	bool exts_integrated;
 
 42	u32 gen_flags;
 43	unsigned int in_hw_count;
 44	struct tcf_exts exts;
 45	u32 handle;
 46	u16 bpf_num_ops;
 47	struct sock_filter *bpf_ops;
 48	const char *bpf_name;
 49	struct tcf_proto *tp;
 50	struct rcu_work rwork;
 51};
 52
 53static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
 54	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
 55	[TCA_BPF_FLAGS]		= { .type = NLA_U32 },
 56	[TCA_BPF_FLAGS_GEN]	= { .type = NLA_U32 },
 57	[TCA_BPF_FD]		= { .type = NLA_U32 },
 58	[TCA_BPF_NAME]		= { .type = NLA_NUL_STRING,
 59				    .len = CLS_BPF_NAME_LEN },
 60	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
 61	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
 62				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
 63};
 64
 65static int cls_bpf_exec_opcode(int code)
 66{
 67	switch (code) {
 68	case TC_ACT_OK:
 69	case TC_ACT_SHOT:
 70	case TC_ACT_STOLEN:
 71	case TC_ACT_TRAP:
 72	case TC_ACT_REDIRECT:
 73	case TC_ACT_UNSPEC:
 74		return code;
 75	default:
 76		return TC_ACT_UNSPEC;
 77	}
 78}
 79
 80static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 81			    struct tcf_result *res)
 82{
 83	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
 84	bool at_ingress = skb_at_tc_ingress(skb);
 85	struct cls_bpf_prog *prog;
 86	int ret = -1;
 87
 88	/* Needed here for accessing maps. */
 89	rcu_read_lock();
 90	list_for_each_entry_rcu(prog, &head->plist, link) {
 91		int filter_res;
 92
 93		qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
 94
 95		if (tc_skip_sw(prog->gen_flags)) {
 96			filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
 97		} else if (at_ingress) {
 98			/* It is safe to push/pull even if skb_shared() */
 99			__skb_push(skb, skb->mac_len);
100			bpf_compute_data_pointers(skb);
101			filter_res = BPF_PROG_RUN(prog->filter, skb);
102			__skb_pull(skb, skb->mac_len);
103		} else {
104			bpf_compute_data_pointers(skb);
105			filter_res = BPF_PROG_RUN(prog->filter, skb);
106		}
107
108		if (prog->exts_integrated) {
109			res->class   = 0;
110			res->classid = TC_H_MAJ(prog->res.classid) |
111				       qdisc_skb_cb(skb)->tc_classid;
112
113			ret = cls_bpf_exec_opcode(filter_res);
114			if (ret == TC_ACT_UNSPEC)
115				continue;
116			break;
117		}
118
119		if (filter_res == 0)
120			continue;
121		if (filter_res != -1) {
122			res->class   = 0;
123			res->classid = filter_res;
124		} else {
125			*res = prog->res;
126		}
127
128		ret = tcf_exts_exec(skb, &prog->exts, res);
129		if (ret < 0)
130			continue;
131
132		break;
133	}
134	rcu_read_unlock();
135
136	return ret;
137}
138
139static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
140{
141	return !prog->bpf_ops;
142}
143
144static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
145			       struct cls_bpf_prog *oldprog,
146			       struct netlink_ext_ack *extack)
147{
148	struct tcf_block *block = tp->chain->block;
149	struct tc_cls_bpf_offload cls_bpf = {};
150	struct cls_bpf_prog *obj;
151	bool skip_sw;
152	int err;
153
154	skip_sw = prog && tc_skip_sw(prog->gen_flags);
155	obj = prog ?: oldprog;
156
157	tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, extack);
158	cls_bpf.command = TC_CLSBPF_OFFLOAD;
159	cls_bpf.exts = &obj->exts;
160	cls_bpf.prog = prog ? prog->filter : NULL;
161	cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
162	cls_bpf.name = obj->bpf_name;
163	cls_bpf.exts_integrated = obj->exts_integrated;
164
165	if (oldprog && prog)
166		err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
167					  skip_sw, &oldprog->gen_flags,
168					  &oldprog->in_hw_count,
169					  &prog->gen_flags, &prog->in_hw_count,
170					  true);
171	else if (prog)
172		err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
173				      skip_sw, &prog->gen_flags,
174				      &prog->in_hw_count, true);
175	else
176		err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
177					  skip_sw, &oldprog->gen_flags,
178					  &oldprog->in_hw_count, true);
179
180	if (prog && err) {
181		cls_bpf_offload_cmd(tp, oldprog, prog, extack);
182		return err;
183	}
184
185	if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
186		return -EINVAL;
187
188	return 0;
189}
190
191static u32 cls_bpf_flags(u32 flags)
192{
193	return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
194}
195
196static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
197			   struct cls_bpf_prog *oldprog,
198			   struct netlink_ext_ack *extack)
199{
200	if (prog && oldprog &&
201	    cls_bpf_flags(prog->gen_flags) !=
202	    cls_bpf_flags(oldprog->gen_flags))
203		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
205	if (prog && tc_skip_hw(prog->gen_flags))
206		prog = NULL;
207	if (oldprog && tc_skip_hw(oldprog->gen_flags))
208		oldprog = NULL;
209	if (!prog && !oldprog)
210		return 0;
211
212	return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
213}
214
215static void cls_bpf_stop_offload(struct tcf_proto *tp,
216				 struct cls_bpf_prog *prog,
217				 struct netlink_ext_ack *extack)
218{
219	int err;
220
221	err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
222	if (err)
 
 
 
223		pr_err("Stopping hardware offload failed: %d\n", err);
 
 
 
 
224}
225
226static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
227					 struct cls_bpf_prog *prog)
228{
229	struct tcf_block *block = tp->chain->block;
230	struct tc_cls_bpf_offload cls_bpf = {};
231
232	tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
233	cls_bpf.command = TC_CLSBPF_STATS;
234	cls_bpf.exts = &prog->exts;
235	cls_bpf.prog = prog->filter;
236	cls_bpf.name = prog->bpf_name;
237	cls_bpf.exts_integrated = prog->exts_integrated;
238
239	tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, false, true);
240}
241
242static int cls_bpf_init(struct tcf_proto *tp)
243{
244	struct cls_bpf_head *head;
245
246	head = kzalloc(sizeof(*head), GFP_KERNEL);
247	if (head == NULL)
248		return -ENOBUFS;
249
250	INIT_LIST_HEAD_RCU(&head->plist);
251	idr_init(&head->handle_idr);
252	rcu_assign_pointer(tp->root, head);
253
254	return 0;
255}
256
257static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
258{
 
 
259	if (cls_bpf_is_ebpf(prog))
260		bpf_prog_put(prog->filter);
261	else
262		bpf_prog_destroy(prog->filter);
263
264	kfree(prog->bpf_name);
265	kfree(prog->bpf_ops);
266}
267
268static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
269{
270	tcf_exts_destroy(&prog->exts);
271	tcf_exts_put_net(&prog->exts);
272
273	cls_bpf_free_parms(prog);
274	kfree(prog);
275}
276
277static void cls_bpf_delete_prog_work(struct work_struct *work)
278{
279	struct cls_bpf_prog *prog = container_of(to_rcu_work(work),
280						 struct cls_bpf_prog,
281						 rwork);
282	rtnl_lock();
283	__cls_bpf_delete_prog(prog);
284	rtnl_unlock();
285}
286
287static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
288			     struct netlink_ext_ack *extack)
289{
290	struct cls_bpf_head *head = rtnl_dereference(tp->root);
291
292	idr_remove(&head->handle_idr, prog->handle);
293	cls_bpf_stop_offload(tp, prog, extack);
294	list_del_rcu(&prog->link);
295	tcf_unbind_filter(tp, &prog->res);
296	if (tcf_exts_get_net(&prog->exts))
297		tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work);
298	else
299		__cls_bpf_delete_prog(prog);
300}
301
302static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
303			  bool rtnl_held, struct netlink_ext_ack *extack)
304{
305	struct cls_bpf_head *head = rtnl_dereference(tp->root);
306
307	__cls_bpf_delete(tp, arg, extack);
308	*last = list_empty(&head->plist);
309	return 0;
310}
311
312static void cls_bpf_destroy(struct tcf_proto *tp, bool rtnl_held,
313			    struct netlink_ext_ack *extack)
314{
315	struct cls_bpf_head *head = rtnl_dereference(tp->root);
316	struct cls_bpf_prog *prog, *tmp;
317
 
 
 
318	list_for_each_entry_safe(prog, tmp, &head->plist, link)
319		__cls_bpf_delete(tp, prog, extack);
320
321	idr_destroy(&head->handle_idr);
322	kfree_rcu(head, rcu);
 
323}
324
325static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
326{
327	struct cls_bpf_head *head = rtnl_dereference(tp->root);
328	struct cls_bpf_prog *prog;
 
329
330	list_for_each_entry(prog, &head->plist, link) {
331		if (prog->handle == handle)
332			return prog;
 
 
333	}
334
335	return NULL;
336}
337
338static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
339{
340	struct sock_filter *bpf_ops;
341	struct sock_fprog_kern fprog_tmp;
342	struct bpf_prog *fp;
343	u16 bpf_size, bpf_num_ops;
344	int ret;
345
346	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
347	if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
348		return -EINVAL;
349
350	bpf_size = bpf_num_ops * sizeof(*bpf_ops);
351	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
352		return -EINVAL;
353
354	bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL);
355	if (bpf_ops == NULL)
356		return -ENOMEM;
357
 
 
358	fprog_tmp.len = bpf_num_ops;
359	fprog_tmp.filter = bpf_ops;
360
361	ret = bpf_prog_create(&fp, &fprog_tmp);
362	if (ret < 0) {
363		kfree(bpf_ops);
364		return ret;
365	}
366
367	prog->bpf_ops = bpf_ops;
368	prog->bpf_num_ops = bpf_num_ops;
369	prog->bpf_name = NULL;
370	prog->filter = fp;
371
372	return 0;
373}
374
375static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
376				 u32 gen_flags, const struct tcf_proto *tp)
377{
378	struct bpf_prog *fp;
379	char *name = NULL;
380	bool skip_sw;
381	u32 bpf_fd;
382
383	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
384	skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
385
386	fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
387	if (IS_ERR(fp))
388		return PTR_ERR(fp);
389
390	if (tb[TCA_BPF_NAME]) {
391		name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
392		if (!name) {
393			bpf_prog_put(fp);
394			return -ENOMEM;
395		}
396	}
397
398	prog->bpf_ops = NULL;
399	prog->bpf_name = name;
400	prog->filter = fp;
401
402	if (fp->dst_needed)
403		tcf_block_netif_keep_dst(tp->chain->block);
404
405	return 0;
406}
407
408static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
409			     struct cls_bpf_prog *prog, unsigned long base,
410			     struct nlattr **tb, struct nlattr *est, bool ovr,
411			     struct netlink_ext_ack *extack)
412{
413	bool is_bpf, is_ebpf, have_exts = false;
 
414	u32 gen_flags = 0;
415	int ret;
416
417	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
418	is_ebpf = tb[TCA_BPF_FD];
419	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
420		return -EINVAL;
421
422	ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr, true,
423				extack);
424	if (ret < 0)
425		return ret;
 
 
 
426
427	if (tb[TCA_BPF_FLAGS]) {
428		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
429
430		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
431			return -EINVAL;
 
 
432
433		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
434	}
435	if (tb[TCA_BPF_FLAGS_GEN]) {
436		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
437		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
438		    !tc_flags_valid(gen_flags))
439			return -EINVAL;
 
 
440	}
441
442	prog->exts_integrated = have_exts;
443	prog->gen_flags = gen_flags;
444
445	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
446		       cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
447	if (ret < 0)
448		return ret;
449
450	if (tb[TCA_BPF_CLASSID]) {
451		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
452		tcf_bind_filter(tp, &prog->res, base);
453	}
454
 
455	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456}
457
458static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
459			  struct tcf_proto *tp, unsigned long base,
460			  u32 handle, struct nlattr **tca,
461			  void **arg, bool ovr, bool rtnl_held,
462			  struct netlink_ext_ack *extack)
463{
464	struct cls_bpf_head *head = rtnl_dereference(tp->root);
465	struct cls_bpf_prog *oldprog = *arg;
466	struct nlattr *tb[TCA_BPF_MAX + 1];
467	struct cls_bpf_prog *prog;
468	int ret;
469
470	if (tca[TCA_OPTIONS] == NULL)
471		return -EINVAL;
472
473	ret = nla_parse_nested_deprecated(tb, TCA_BPF_MAX, tca[TCA_OPTIONS],
474					  bpf_policy, NULL);
475	if (ret < 0)
476		return ret;
477
478	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
479	if (!prog)
480		return -ENOBUFS;
481
482	ret = tcf_exts_init(&prog->exts, net, TCA_BPF_ACT, TCA_BPF_POLICE);
483	if (ret < 0)
484		goto errout;
485
486	if (oldprog) {
487		if (handle && oldprog->handle != handle) {
488			ret = -EINVAL;
489			goto errout;
490		}
491	}
492
493	if (handle == 0) {
494		handle = 1;
495		ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
496				    INT_MAX, GFP_KERNEL);
497	} else if (!oldprog) {
498		ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
499				    handle, GFP_KERNEL);
500	}
501
502	if (ret)
503		goto errout;
504	prog->handle = handle;
505
506	ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr,
507				extack);
508	if (ret < 0)
509		goto errout_idr;
510
511	ret = cls_bpf_offload(tp, prog, oldprog, extack);
512	if (ret)
513		goto errout_parms;
514
515	if (!tc_in_hw(prog->gen_flags))
516		prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
 
 
 
517
518	if (oldprog) {
519		idr_replace(&head->handle_idr, prog, handle);
520		list_replace_rcu(&oldprog->link, &prog->link);
521		tcf_unbind_filter(tp, &oldprog->res);
522		tcf_exts_get_net(&oldprog->exts);
523		tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work);
524	} else {
525		list_add_rcu(&prog->link, &head->plist);
526	}
527
528	*arg = prog;
529	return 0;
530
531errout_parms:
532	cls_bpf_free_parms(prog);
533errout_idr:
534	if (!oldprog)
535		idr_remove(&head->handle_idr, prog->handle);
536errout:
537	tcf_exts_destroy(&prog->exts);
538	kfree(prog);
539	return ret;
540}
541
542static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
543				 struct sk_buff *skb)
544{
545	struct nlattr *nla;
546
547	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
548		return -EMSGSIZE;
549
550	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
551			  sizeof(struct sock_filter));
552	if (nla == NULL)
553		return -EMSGSIZE;
554
555	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
556
557	return 0;
558}
559
560static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
561				  struct sk_buff *skb)
562{
563	struct nlattr *nla;
564
565	if (prog->bpf_name &&
566	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
567		return -EMSGSIZE;
568
569	if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
570		return -EMSGSIZE;
571
572	nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
573	if (nla == NULL)
574		return -EMSGSIZE;
575
576	memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
577
578	return 0;
579}
580
581static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
582			struct sk_buff *skb, struct tcmsg *tm, bool rtnl_held)
583{
584	struct cls_bpf_prog *prog = fh;
585	struct nlattr *nest;
586	u32 bpf_flags = 0;
587	int ret;
588
589	if (prog == NULL)
590		return skb->len;
591
592	tm->tcm_handle = prog->handle;
593
594	cls_bpf_offload_update_stats(tp, prog);
595
596	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
597	if (nest == NULL)
598		goto nla_put_failure;
599
600	if (prog->res.classid &&
601	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
602		goto nla_put_failure;
603
604	if (cls_bpf_is_ebpf(prog))
605		ret = cls_bpf_dump_ebpf_info(prog, skb);
606	else
607		ret = cls_bpf_dump_bpf_info(prog, skb);
608	if (ret)
609		goto nla_put_failure;
610
611	if (tcf_exts_dump(skb, &prog->exts) < 0)
612		goto nla_put_failure;
613
614	if (prog->exts_integrated)
615		bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
616	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
617		goto nla_put_failure;
618	if (prog->gen_flags &&
619	    nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
620		goto nla_put_failure;
621
622	nla_nest_end(skb, nest);
623
624	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
625		goto nla_put_failure;
626
627	return skb->len;
628
629nla_put_failure:
630	nla_nest_cancel(skb, nest);
631	return -1;
632}
633
634static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl)
635{
636	struct cls_bpf_prog *prog = fh;
637
638	if (prog && prog->res.classid == classid)
639		prog->res.class = cl;
640}
641
642static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
643			 bool rtnl_held)
644{
645	struct cls_bpf_head *head = rtnl_dereference(tp->root);
646	struct cls_bpf_prog *prog;
647
648	list_for_each_entry(prog, &head->plist, link) {
649		if (arg->count < arg->skip)
650			goto skip;
651		if (arg->fn(tp, prog, arg) < 0) {
652			arg->stop = 1;
653			break;
654		}
655skip:
656		arg->count++;
657	}
658}
659
660static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
661			     void *cb_priv, struct netlink_ext_ack *extack)
662{
663	struct cls_bpf_head *head = rtnl_dereference(tp->root);
664	struct tcf_block *block = tp->chain->block;
665	struct tc_cls_bpf_offload cls_bpf = {};
666	struct cls_bpf_prog *prog;
667	int err;
668
669	list_for_each_entry(prog, &head->plist, link) {
670		if (tc_skip_hw(prog->gen_flags))
671			continue;
672
673		tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
674					   extack);
675		cls_bpf.command = TC_CLSBPF_OFFLOAD;
676		cls_bpf.exts = &prog->exts;
677		cls_bpf.prog = add ? prog->filter : NULL;
678		cls_bpf.oldprog = add ? NULL : prog->filter;
679		cls_bpf.name = prog->bpf_name;
680		cls_bpf.exts_integrated = prog->exts_integrated;
681
682		err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSBPF,
683					    &cls_bpf, cb_priv, &prog->gen_flags,
684					    &prog->in_hw_count);
685		if (err)
686			return err;
687	}
688
689	return 0;
690}
691
692static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
693	.kind		=	"bpf",
694	.owner		=	THIS_MODULE,
695	.classify	=	cls_bpf_classify,
696	.init		=	cls_bpf_init,
697	.destroy	=	cls_bpf_destroy,
698	.get		=	cls_bpf_get,
699	.change		=	cls_bpf_change,
700	.delete		=	cls_bpf_delete,
701	.walk		=	cls_bpf_walk,
702	.reoffload	=	cls_bpf_reoffload,
703	.dump		=	cls_bpf_dump,
704	.bind_class	=	cls_bpf_bind_class,
705};
706
707static int __init cls_bpf_init_mod(void)
708{
709	return register_tcf_proto_ops(&cls_bpf_ops);
710}
711
712static void __exit cls_bpf_exit_mod(void)
713{
714	unregister_tcf_proto_ops(&cls_bpf_ops);
715}
716
717module_init(cls_bpf_init_mod);
718module_exit(cls_bpf_exit_mod);
v4.10.11
 
  1/*
  2 * Berkeley Packet Filter based traffic classifier
  3 *
  4 * Might be used to classify traffic through flexible, user-defined and
  5 * possibly JIT-ed BPF filters for traffic control as an alternative to
  6 * ematches.
  7 *
  8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
  9 *
 10 * This program is free software; you can redistribute it and/or modify
 11 * it under the terms of the GNU General Public License version 2 as
 12 * published by the Free Software Foundation.
 13 */
 14
 15#include <linux/module.h>
 16#include <linux/types.h>
 17#include <linux/skbuff.h>
 18#include <linux/filter.h>
 19#include <linux/bpf.h>
 
 20
 21#include <net/rtnetlink.h>
 22#include <net/pkt_cls.h>
 23#include <net/sock.h>
 24
 25MODULE_LICENSE("GPL");
 26MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
 27MODULE_DESCRIPTION("TC BPF based classifier");
 28
 29#define CLS_BPF_NAME_LEN	256
 30#define CLS_BPF_SUPPORTED_GEN_FLAGS		\
 31	(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
 32
 33struct cls_bpf_head {
 34	struct list_head plist;
 35	u32 hgen;
 36	struct rcu_head rcu;
 37};
 38
 39struct cls_bpf_prog {
 40	struct bpf_prog *filter;
 41	struct list_head link;
 42	struct tcf_result res;
 43	bool exts_integrated;
 44	bool offloaded;
 45	u32 gen_flags;
 
 46	struct tcf_exts exts;
 47	u32 handle;
 48	u16 bpf_num_ops;
 49	struct sock_filter *bpf_ops;
 50	const char *bpf_name;
 51	struct tcf_proto *tp;
 52	struct rcu_head rcu;
 53};
 54
 55static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
 56	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
 57	[TCA_BPF_FLAGS]		= { .type = NLA_U32 },
 58	[TCA_BPF_FLAGS_GEN]	= { .type = NLA_U32 },
 59	[TCA_BPF_FD]		= { .type = NLA_U32 },
 60	[TCA_BPF_NAME]		= { .type = NLA_NUL_STRING,
 61				    .len = CLS_BPF_NAME_LEN },
 62	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
 63	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
 64				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
 65};
 66
 67static int cls_bpf_exec_opcode(int code)
 68{
 69	switch (code) {
 70	case TC_ACT_OK:
 71	case TC_ACT_SHOT:
 72	case TC_ACT_STOLEN:
 
 73	case TC_ACT_REDIRECT:
 74	case TC_ACT_UNSPEC:
 75		return code;
 76	default:
 77		return TC_ACT_UNSPEC;
 78	}
 79}
 80
 81static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 82			    struct tcf_result *res)
 83{
 84	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
 85	bool at_ingress = skb_at_tc_ingress(skb);
 86	struct cls_bpf_prog *prog;
 87	int ret = -1;
 88
 89	/* Needed here for accessing maps. */
 90	rcu_read_lock();
 91	list_for_each_entry_rcu(prog, &head->plist, link) {
 92		int filter_res;
 93
 94		qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
 95
 96		if (tc_skip_sw(prog->gen_flags)) {
 97			filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
 98		} else if (at_ingress) {
 99			/* It is safe to push/pull even if skb_shared() */
100			__skb_push(skb, skb->mac_len);
101			bpf_compute_data_end(skb);
102			filter_res = BPF_PROG_RUN(prog->filter, skb);
103			__skb_pull(skb, skb->mac_len);
104		} else {
105			bpf_compute_data_end(skb);
106			filter_res = BPF_PROG_RUN(prog->filter, skb);
107		}
108
109		if (prog->exts_integrated) {
110			res->class   = 0;
111			res->classid = TC_H_MAJ(prog->res.classid) |
112				       qdisc_skb_cb(skb)->tc_classid;
113
114			ret = cls_bpf_exec_opcode(filter_res);
115			if (ret == TC_ACT_UNSPEC)
116				continue;
117			break;
118		}
119
120		if (filter_res == 0)
121			continue;
122		if (filter_res != -1) {
123			res->class   = 0;
124			res->classid = filter_res;
125		} else {
126			*res = prog->res;
127		}
128
129		ret = tcf_exts_exec(skb, &prog->exts, res);
130		if (ret < 0)
131			continue;
132
133		break;
134	}
135	rcu_read_unlock();
136
137	return ret;
138}
139
140static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
141{
142	return !prog->bpf_ops;
143}
144
145static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
146			       enum tc_clsbpf_command cmd)
 
147{
148	struct net_device *dev = tp->q->dev_queue->dev;
149	struct tc_cls_bpf_offload bpf_offload = {};
150	struct tc_to_netdev offload;
151
152	offload.type = TC_SETUP_CLSBPF;
153	offload.cls_bpf = &bpf_offload;
154
155	bpf_offload.command = cmd;
156	bpf_offload.exts = &prog->exts;
157	bpf_offload.prog = prog->filter;
158	bpf_offload.name = prog->bpf_name;
159	bpf_offload.exts_integrated = prog->exts_integrated;
160	bpf_offload.gen_flags = prog->gen_flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
162	return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
163					     tp->protocol, &offload);
 
 
 
 
 
 
 
 
 
 
 
 
164}
165
166static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
167			   struct cls_bpf_prog *oldprog)
 
168{
169	struct net_device *dev = tp->q->dev_queue->dev;
170	struct cls_bpf_prog *obj = prog;
171	enum tc_clsbpf_command cmd;
172	bool skip_sw;
173	int ret;
174
175	skip_sw = tc_skip_sw(prog->gen_flags) ||
176		(oldprog && tc_skip_sw(oldprog->gen_flags));
177
178	if (oldprog && oldprog->offloaded) {
179		if (tc_should_offload(dev, tp, prog->gen_flags)) {
180			cmd = TC_CLSBPF_REPLACE;
181		} else if (!tc_skip_sw(prog->gen_flags)) {
182			obj = oldprog;
183			cmd = TC_CLSBPF_DESTROY;
184		} else {
185			return -EINVAL;
186		}
187	} else {
188		if (!tc_should_offload(dev, tp, prog->gen_flags))
189			return skip_sw ? -EINVAL : 0;
190		cmd = TC_CLSBPF_ADD;
191	}
192
193	ret = cls_bpf_offload_cmd(tp, obj, cmd);
194	if (ret)
195		return skip_sw ? ret : 0;
196
197	obj->offloaded = true;
198	if (oldprog)
199		oldprog->offloaded = false;
 
 
 
200
201	return 0;
202}
203
204static void cls_bpf_stop_offload(struct tcf_proto *tp,
205				 struct cls_bpf_prog *prog)
 
206{
207	int err;
208
209	if (!prog->offloaded)
210		return;
211
212	err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
213	if (err) {
214		pr_err("Stopping hardware offload failed: %d\n", err);
215		return;
216	}
217
218	prog->offloaded = false;
219}
220
221static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
222					 struct cls_bpf_prog *prog)
223{
224	if (!prog->offloaded)
225		return;
226
227	cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
 
 
 
 
 
 
 
228}
229
230static int cls_bpf_init(struct tcf_proto *tp)
231{
232	struct cls_bpf_head *head;
233
234	head = kzalloc(sizeof(*head), GFP_KERNEL);
235	if (head == NULL)
236		return -ENOBUFS;
237
238	INIT_LIST_HEAD_RCU(&head->plist);
 
239	rcu_assign_pointer(tp->root, head);
240
241	return 0;
242}
243
244static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
245{
246	tcf_exts_destroy(&prog->exts);
247
248	if (cls_bpf_is_ebpf(prog))
249		bpf_prog_put(prog->filter);
250	else
251		bpf_prog_destroy(prog->filter);
252
253	kfree(prog->bpf_name);
254	kfree(prog->bpf_ops);
 
 
 
 
 
 
 
 
255	kfree(prog);
256}
257
258static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
259{
260	__cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu));
 
 
 
 
 
261}
262
263static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
 
264{
265	cls_bpf_stop_offload(tp, prog);
 
 
 
266	list_del_rcu(&prog->link);
267	tcf_unbind_filter(tp, &prog->res);
268	call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
 
 
 
269}
270
271static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
 
272{
273	__cls_bpf_delete(tp, (struct cls_bpf_prog *) arg);
 
 
 
274	return 0;
275}
276
277static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
 
278{
279	struct cls_bpf_head *head = rtnl_dereference(tp->root);
280	struct cls_bpf_prog *prog, *tmp;
281
282	if (!force && !list_empty(&head->plist))
283		return false;
284
285	list_for_each_entry_safe(prog, tmp, &head->plist, link)
286		__cls_bpf_delete(tp, prog);
287
 
288	kfree_rcu(head, rcu);
289	return true;
290}
291
292static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
293{
294	struct cls_bpf_head *head = rtnl_dereference(tp->root);
295	struct cls_bpf_prog *prog;
296	unsigned long ret = 0UL;
297
298	list_for_each_entry(prog, &head->plist, link) {
299		if (prog->handle == handle) {
300			ret = (unsigned long) prog;
301			break;
302		}
303	}
304
305	return ret;
306}
307
308static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
309{
310	struct sock_filter *bpf_ops;
311	struct sock_fprog_kern fprog_tmp;
312	struct bpf_prog *fp;
313	u16 bpf_size, bpf_num_ops;
314	int ret;
315
316	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
317	if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
318		return -EINVAL;
319
320	bpf_size = bpf_num_ops * sizeof(*bpf_ops);
321	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
322		return -EINVAL;
323
324	bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
325	if (bpf_ops == NULL)
326		return -ENOMEM;
327
328	memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
329
330	fprog_tmp.len = bpf_num_ops;
331	fprog_tmp.filter = bpf_ops;
332
333	ret = bpf_prog_create(&fp, &fprog_tmp);
334	if (ret < 0) {
335		kfree(bpf_ops);
336		return ret;
337	}
338
339	prog->bpf_ops = bpf_ops;
340	prog->bpf_num_ops = bpf_num_ops;
341	prog->bpf_name = NULL;
342	prog->filter = fp;
343
344	return 0;
345}
346
347static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
348				 const struct tcf_proto *tp)
349{
350	struct bpf_prog *fp;
351	char *name = NULL;
 
352	u32 bpf_fd;
353
354	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
 
355
356	fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
357	if (IS_ERR(fp))
358		return PTR_ERR(fp);
359
360	if (tb[TCA_BPF_NAME]) {
361		name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
362		if (!name) {
363			bpf_prog_put(fp);
364			return -ENOMEM;
365		}
366	}
367
368	prog->bpf_ops = NULL;
369	prog->bpf_name = name;
370	prog->filter = fp;
371
372	if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
373		netif_keep_dst(qdisc_dev(tp->q));
374
375	return 0;
376}
377
378static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
379				   struct cls_bpf_prog *prog,
380				   unsigned long base, struct nlattr **tb,
381				   struct nlattr *est, bool ovr)
382{
383	bool is_bpf, is_ebpf, have_exts = false;
384	struct tcf_exts exts;
385	u32 gen_flags = 0;
386	int ret;
387
388	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
389	is_ebpf = tb[TCA_BPF_FD];
390	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
391		return -EINVAL;
392
393	ret = tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
 
394	if (ret < 0)
395		return ret;
396	ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
397	if (ret < 0)
398		goto errout;
399
400	if (tb[TCA_BPF_FLAGS]) {
401		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
402
403		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
404			ret = -EINVAL;
405			goto errout;
406		}
407
408		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
409	}
410	if (tb[TCA_BPF_FLAGS_GEN]) {
411		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
412		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
413		    !tc_flags_valid(gen_flags)) {
414			ret = -EINVAL;
415			goto errout;
416		}
417	}
418
419	prog->exts_integrated = have_exts;
420	prog->gen_flags = gen_flags;
421
422	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
423		       cls_bpf_prog_from_efd(tb, prog, tp);
424	if (ret < 0)
425		goto errout;
426
427	if (tb[TCA_BPF_CLASSID]) {
428		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
429		tcf_bind_filter(tp, &prog->res, base);
430	}
431
432	tcf_exts_change(tp, &prog->exts, &exts);
433	return 0;
434
435errout:
436	tcf_exts_destroy(&exts);
437	return ret;
438}
439
440static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
441				   struct cls_bpf_head *head)
442{
443	unsigned int i = 0x80000000;
444	u32 handle;
445
446	do {
447		if (++head->hgen == 0x7FFFFFFF)
448			head->hgen = 1;
449	} while (--i > 0 && cls_bpf_get(tp, head->hgen));
450
451	if (unlikely(i == 0)) {
452		pr_err("Insufficient number of handles\n");
453		handle = 0;
454	} else {
455		handle = head->hgen;
456	}
457
458	return handle;
459}
460
461static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
462			  struct tcf_proto *tp, unsigned long base,
463			  u32 handle, struct nlattr **tca,
464			  unsigned long *arg, bool ovr)
 
465{
466	struct cls_bpf_head *head = rtnl_dereference(tp->root);
467	struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
468	struct nlattr *tb[TCA_BPF_MAX + 1];
469	struct cls_bpf_prog *prog;
470	int ret;
471
472	if (tca[TCA_OPTIONS] == NULL)
473		return -EINVAL;
474
475	ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
 
476	if (ret < 0)
477		return ret;
478
479	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
480	if (!prog)
481		return -ENOBUFS;
482
483	ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
484	if (ret < 0)
485		goto errout;
486
487	if (oldprog) {
488		if (handle && oldprog->handle != handle) {
489			ret = -EINVAL;
490			goto errout;
491		}
492	}
493
494	if (handle == 0)
495		prog->handle = cls_bpf_grab_new_handle(tp, head);
496	else
497		prog->handle = handle;
498	if (prog->handle == 0) {
499		ret = -EINVAL;
 
 
 
 
500		goto errout;
501	}
502
503	ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE],
504				      ovr);
505	if (ret < 0)
506		goto errout;
 
 
 
 
507
508	ret = cls_bpf_offload(tp, prog, oldprog);
509	if (ret) {
510		__cls_bpf_delete_prog(prog);
511		return ret;
512	}
513
514	if (oldprog) {
 
515		list_replace_rcu(&oldprog->link, &prog->link);
516		tcf_unbind_filter(tp, &oldprog->res);
517		call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
 
518	} else {
519		list_add_rcu(&prog->link, &head->plist);
520	}
521
522	*arg = (unsigned long) prog;
523	return 0;
524
 
 
 
 
 
525errout:
526	tcf_exts_destroy(&prog->exts);
527	kfree(prog);
528	return ret;
529}
530
531static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
532				 struct sk_buff *skb)
533{
534	struct nlattr *nla;
535
536	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
537		return -EMSGSIZE;
538
539	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
540			  sizeof(struct sock_filter));
541	if (nla == NULL)
542		return -EMSGSIZE;
543
544	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
545
546	return 0;
547}
548
549static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
550				  struct sk_buff *skb)
551{
552	struct nlattr *nla;
553
554	if (prog->bpf_name &&
555	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
556		return -EMSGSIZE;
557
 
 
 
558	nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
559	if (nla == NULL)
560		return -EMSGSIZE;
561
562	memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
563
564	return 0;
565}
566
567static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
568			struct sk_buff *skb, struct tcmsg *tm)
569{
570	struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
571	struct nlattr *nest;
572	u32 bpf_flags = 0;
573	int ret;
574
575	if (prog == NULL)
576		return skb->len;
577
578	tm->tcm_handle = prog->handle;
579
580	cls_bpf_offload_update_stats(tp, prog);
581
582	nest = nla_nest_start(skb, TCA_OPTIONS);
583	if (nest == NULL)
584		goto nla_put_failure;
585
586	if (prog->res.classid &&
587	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
588		goto nla_put_failure;
589
590	if (cls_bpf_is_ebpf(prog))
591		ret = cls_bpf_dump_ebpf_info(prog, skb);
592	else
593		ret = cls_bpf_dump_bpf_info(prog, skb);
594	if (ret)
595		goto nla_put_failure;
596
597	if (tcf_exts_dump(skb, &prog->exts) < 0)
598		goto nla_put_failure;
599
600	if (prog->exts_integrated)
601		bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
602	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
603		goto nla_put_failure;
604	if (prog->gen_flags &&
605	    nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
606		goto nla_put_failure;
607
608	nla_nest_end(skb, nest);
609
610	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
611		goto nla_put_failure;
612
613	return skb->len;
614
615nla_put_failure:
616	nla_nest_cancel(skb, nest);
617	return -1;
618}
619
620static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 
 
 
 
 
 
 
 
 
621{
622	struct cls_bpf_head *head = rtnl_dereference(tp->root);
623	struct cls_bpf_prog *prog;
624
625	list_for_each_entry(prog, &head->plist, link) {
626		if (arg->count < arg->skip)
627			goto skip;
628		if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
629			arg->stop = 1;
630			break;
631		}
632skip:
633		arg->count++;
634	}
635}
636
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
637static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
638	.kind		=	"bpf",
639	.owner		=	THIS_MODULE,
640	.classify	=	cls_bpf_classify,
641	.init		=	cls_bpf_init,
642	.destroy	=	cls_bpf_destroy,
643	.get		=	cls_bpf_get,
644	.change		=	cls_bpf_change,
645	.delete		=	cls_bpf_delete,
646	.walk		=	cls_bpf_walk,
 
647	.dump		=	cls_bpf_dump,
 
648};
649
650static int __init cls_bpf_init_mod(void)
651{
652	return register_tcf_proto_ops(&cls_bpf_ops);
653}
654
655static void __exit cls_bpf_exit_mod(void)
656{
657	unregister_tcf_proto_ops(&cls_bpf_ops);
658}
659
660module_init(cls_bpf_init_mod);
661module_exit(cls_bpf_exit_mod);