Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Berkeley Packet Filter based traffic classifier
  4 *
  5 * Might be used to classify traffic through flexible, user-defined and
  6 * possibly JIT-ed BPF filters for traffic control as an alternative to
  7 * ematches.
  8 *
  9 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
 10 */
 11
 12#include <linux/module.h>
 13#include <linux/types.h>
 14#include <linux/skbuff.h>
 15#include <linux/filter.h>
 16#include <linux/bpf.h>
 17#include <linux/idr.h>
 18
 19#include <net/rtnetlink.h>
 20#include <net/pkt_cls.h>
 21#include <net/sock.h>
 22
 23MODULE_LICENSE("GPL");
 24MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
 25MODULE_DESCRIPTION("TC BPF based classifier");
 26
 27#define CLS_BPF_NAME_LEN	256
 28#define CLS_BPF_SUPPORTED_GEN_FLAGS		\
 29	(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
 30
 31struct cls_bpf_head {
 32	struct list_head plist;
 33	struct idr handle_idr;
 34	struct rcu_head rcu;
 35};
 36
 37struct cls_bpf_prog {
 38	struct bpf_prog *filter;
 39	struct list_head link;
 40	struct tcf_result res;
 41	bool exts_integrated;
 42	u32 gen_flags;
 43	unsigned int in_hw_count;
 44	struct tcf_exts exts;
 45	u32 handle;
 46	u16 bpf_num_ops;
 47	struct sock_filter *bpf_ops;
 48	const char *bpf_name;
 49	struct tcf_proto *tp;
 50	struct rcu_work rwork;
 51};
 52
 53static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
 54	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
 55	[TCA_BPF_FLAGS]		= { .type = NLA_U32 },
 56	[TCA_BPF_FLAGS_GEN]	= { .type = NLA_U32 },
 57	[TCA_BPF_FD]		= { .type = NLA_U32 },
 58	[TCA_BPF_NAME]		= { .type = NLA_NUL_STRING,
 59				    .len = CLS_BPF_NAME_LEN },
 60	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
 61	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
 62				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
 63};
 64
 65static int cls_bpf_exec_opcode(int code)
 66{
 67	switch (code) {
 68	case TC_ACT_OK:
 69	case TC_ACT_SHOT:
 70	case TC_ACT_STOLEN:
 71	case TC_ACT_TRAP:
 72	case TC_ACT_REDIRECT:
 73	case TC_ACT_UNSPEC:
 74		return code;
 75	default:
 76		return TC_ACT_UNSPEC;
 77	}
 78}
 79
 80static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 81			    struct tcf_result *res)
 82{
 83	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
 84	bool at_ingress = skb_at_tc_ingress(skb);
 85	struct cls_bpf_prog *prog;
 86	int ret = -1;
 87
 88	list_for_each_entry_rcu(prog, &head->plist, link) {
 89		int filter_res;
 90
 91		qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
 92
 93		if (tc_skip_sw(prog->gen_flags)) {
 94			filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
 95		} else if (at_ingress) {
 96			/* It is safe to push/pull even if skb_shared() */
 97			__skb_push(skb, skb->mac_len);
 98			bpf_compute_data_pointers(skb);
 99			filter_res = BPF_PROG_RUN(prog->filter, skb);
100			__skb_pull(skb, skb->mac_len);
101		} else {
102			bpf_compute_data_pointers(skb);
103			filter_res = BPF_PROG_RUN(prog->filter, skb);
104		}
105
106		if (prog->exts_integrated) {
107			res->class   = 0;
108			res->classid = TC_H_MAJ(prog->res.classid) |
109				       qdisc_skb_cb(skb)->tc_classid;
110
111			ret = cls_bpf_exec_opcode(filter_res);
112			if (ret == TC_ACT_UNSPEC)
113				continue;
114			break;
115		}
116
117		if (filter_res == 0)
118			continue;
119		if (filter_res != -1) {
120			res->class   = 0;
121			res->classid = filter_res;
122		} else {
123			*res = prog->res;
124		}
125
126		ret = tcf_exts_exec(skb, &prog->exts, res);
127		if (ret < 0)
128			continue;
129
130		break;
131	}
132
133	return ret;
134}
135
136static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
137{
138	return !prog->bpf_ops;
139}
140
141static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
142			       struct cls_bpf_prog *oldprog,
143			       struct netlink_ext_ack *extack)
144{
145	struct tcf_block *block = tp->chain->block;
146	struct tc_cls_bpf_offload cls_bpf = {};
147	struct cls_bpf_prog *obj;
148	bool skip_sw;
149	int err;
150
151	skip_sw = prog && tc_skip_sw(prog->gen_flags);
152	obj = prog ?: oldprog;
153
154	tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, extack);
155	cls_bpf.command = TC_CLSBPF_OFFLOAD;
156	cls_bpf.exts = &obj->exts;
157	cls_bpf.prog = prog ? prog->filter : NULL;
158	cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
159	cls_bpf.name = obj->bpf_name;
160	cls_bpf.exts_integrated = obj->exts_integrated;
161
162	if (oldprog && prog)
163		err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
164					  skip_sw, &oldprog->gen_flags,
165					  &oldprog->in_hw_count,
166					  &prog->gen_flags, &prog->in_hw_count,
167					  true);
168	else if (prog)
169		err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
170				      skip_sw, &prog->gen_flags,
171				      &prog->in_hw_count, true);
172	else
173		err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
174					  skip_sw, &oldprog->gen_flags,
175					  &oldprog->in_hw_count, true);
176
177	if (prog && err) {
178		cls_bpf_offload_cmd(tp, oldprog, prog, extack);
179		return err;
180	}
181
182	if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
183		return -EINVAL;
184
185	return 0;
186}
187
188static u32 cls_bpf_flags(u32 flags)
189{
190	return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
191}
192
193static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
194			   struct cls_bpf_prog *oldprog,
195			   struct netlink_ext_ack *extack)
196{
197	if (prog && oldprog &&
198	    cls_bpf_flags(prog->gen_flags) !=
199	    cls_bpf_flags(oldprog->gen_flags))
200		return -EINVAL;
201
202	if (prog && tc_skip_hw(prog->gen_flags))
203		prog = NULL;
204	if (oldprog && tc_skip_hw(oldprog->gen_flags))
205		oldprog = NULL;
206	if (!prog && !oldprog)
207		return 0;
208
209	return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
210}
211
212static void cls_bpf_stop_offload(struct tcf_proto *tp,
213				 struct cls_bpf_prog *prog,
214				 struct netlink_ext_ack *extack)
215{
216	int err;
217
218	err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
219	if (err)
220		pr_err("Stopping hardware offload failed: %d\n", err);
221}
222
223static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
224					 struct cls_bpf_prog *prog)
225{
226	struct tcf_block *block = tp->chain->block;
227	struct tc_cls_bpf_offload cls_bpf = {};
228
229	tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
230	cls_bpf.command = TC_CLSBPF_STATS;
231	cls_bpf.exts = &prog->exts;
232	cls_bpf.prog = prog->filter;
233	cls_bpf.name = prog->bpf_name;
234	cls_bpf.exts_integrated = prog->exts_integrated;
235
236	tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, false, true);
237}
238
239static int cls_bpf_init(struct tcf_proto *tp)
240{
241	struct cls_bpf_head *head;
242
243	head = kzalloc(sizeof(*head), GFP_KERNEL);
244	if (head == NULL)
245		return -ENOBUFS;
246
247	INIT_LIST_HEAD_RCU(&head->plist);
248	idr_init(&head->handle_idr);
249	rcu_assign_pointer(tp->root, head);
250
251	return 0;
252}
253
254static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
255{
256	if (cls_bpf_is_ebpf(prog))
257		bpf_prog_put(prog->filter);
258	else
259		bpf_prog_destroy(prog->filter);
260
261	kfree(prog->bpf_name);
262	kfree(prog->bpf_ops);
263}
264
265static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
266{
267	tcf_exts_destroy(&prog->exts);
268	tcf_exts_put_net(&prog->exts);
269
270	cls_bpf_free_parms(prog);
271	kfree(prog);
272}
273
274static void cls_bpf_delete_prog_work(struct work_struct *work)
275{
276	struct cls_bpf_prog *prog = container_of(to_rcu_work(work),
277						 struct cls_bpf_prog,
278						 rwork);
279	rtnl_lock();
280	__cls_bpf_delete_prog(prog);
281	rtnl_unlock();
282}
283
284static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
285			     struct netlink_ext_ack *extack)
286{
287	struct cls_bpf_head *head = rtnl_dereference(tp->root);
288
289	idr_remove(&head->handle_idr, prog->handle);
290	cls_bpf_stop_offload(tp, prog, extack);
291	list_del_rcu(&prog->link);
292	tcf_unbind_filter(tp, &prog->res);
293	if (tcf_exts_get_net(&prog->exts))
294		tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work);
295	else
296		__cls_bpf_delete_prog(prog);
297}
298
299static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
300			  bool rtnl_held, struct netlink_ext_ack *extack)
301{
302	struct cls_bpf_head *head = rtnl_dereference(tp->root);
303
304	__cls_bpf_delete(tp, arg, extack);
305	*last = list_empty(&head->plist);
306	return 0;
307}
308
309static void cls_bpf_destroy(struct tcf_proto *tp, bool rtnl_held,
310			    struct netlink_ext_ack *extack)
311{
312	struct cls_bpf_head *head = rtnl_dereference(tp->root);
313	struct cls_bpf_prog *prog, *tmp;
314
315	list_for_each_entry_safe(prog, tmp, &head->plist, link)
316		__cls_bpf_delete(tp, prog, extack);
317
318	idr_destroy(&head->handle_idr);
319	kfree_rcu(head, rcu);
320}
321
322static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
323{
324	struct cls_bpf_head *head = rtnl_dereference(tp->root);
325	struct cls_bpf_prog *prog;
326
327	list_for_each_entry(prog, &head->plist, link) {
328		if (prog->handle == handle)
329			return prog;
330	}
331
332	return NULL;
333}
334
335static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
336{
337	struct sock_filter *bpf_ops;
338	struct sock_fprog_kern fprog_tmp;
339	struct bpf_prog *fp;
340	u16 bpf_size, bpf_num_ops;
341	int ret;
342
343	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
344	if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
345		return -EINVAL;
346
347	bpf_size = bpf_num_ops * sizeof(*bpf_ops);
348	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
349		return -EINVAL;
350
351	bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL);
352	if (bpf_ops == NULL)
353		return -ENOMEM;
354
355	fprog_tmp.len = bpf_num_ops;
356	fprog_tmp.filter = bpf_ops;
357
358	ret = bpf_prog_create(&fp, &fprog_tmp);
359	if (ret < 0) {
360		kfree(bpf_ops);
361		return ret;
362	}
363
364	prog->bpf_ops = bpf_ops;
365	prog->bpf_num_ops = bpf_num_ops;
366	prog->bpf_name = NULL;
367	prog->filter = fp;
368
369	return 0;
370}
371
372static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
373				 u32 gen_flags, const struct tcf_proto *tp)
374{
375	struct bpf_prog *fp;
376	char *name = NULL;
377	bool skip_sw;
378	u32 bpf_fd;
379
380	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
381	skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
382
383	fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
384	if (IS_ERR(fp))
385		return PTR_ERR(fp);
386
387	if (tb[TCA_BPF_NAME]) {
388		name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
389		if (!name) {
390			bpf_prog_put(fp);
391			return -ENOMEM;
392		}
393	}
394
395	prog->bpf_ops = NULL;
396	prog->bpf_name = name;
397	prog->filter = fp;
398
399	if (fp->dst_needed)
400		tcf_block_netif_keep_dst(tp->chain->block);
401
402	return 0;
403}
404
405static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
406			     struct cls_bpf_prog *prog, unsigned long base,
407			     struct nlattr **tb, struct nlattr *est, bool ovr,
408			     struct netlink_ext_ack *extack)
409{
410	bool is_bpf, is_ebpf, have_exts = false;
411	u32 gen_flags = 0;
412	int ret;
413
414	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
415	is_ebpf = tb[TCA_BPF_FD];
416	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
417		return -EINVAL;
418
419	ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr, true,
420				extack);
421	if (ret < 0)
422		return ret;
423
424	if (tb[TCA_BPF_FLAGS]) {
425		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
426
427		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
428			return -EINVAL;
429
430		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
431	}
432	if (tb[TCA_BPF_FLAGS_GEN]) {
433		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
434		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
435		    !tc_flags_valid(gen_flags))
436			return -EINVAL;
437	}
438
439	prog->exts_integrated = have_exts;
440	prog->gen_flags = gen_flags;
441
442	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
443		       cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
444	if (ret < 0)
445		return ret;
446
447	if (tb[TCA_BPF_CLASSID]) {
448		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
449		tcf_bind_filter(tp, &prog->res, base);
450	}
451
452	return 0;
453}
454
455static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
456			  struct tcf_proto *tp, unsigned long base,
457			  u32 handle, struct nlattr **tca,
458			  void **arg, bool ovr, bool rtnl_held,
459			  struct netlink_ext_ack *extack)
460{
461	struct cls_bpf_head *head = rtnl_dereference(tp->root);
462	struct cls_bpf_prog *oldprog = *arg;
463	struct nlattr *tb[TCA_BPF_MAX + 1];
464	struct cls_bpf_prog *prog;
465	int ret;
466
467	if (tca[TCA_OPTIONS] == NULL)
468		return -EINVAL;
469
470	ret = nla_parse_nested_deprecated(tb, TCA_BPF_MAX, tca[TCA_OPTIONS],
471					  bpf_policy, NULL);
472	if (ret < 0)
473		return ret;
474
475	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
476	if (!prog)
477		return -ENOBUFS;
478
479	ret = tcf_exts_init(&prog->exts, net, TCA_BPF_ACT, TCA_BPF_POLICE);
480	if (ret < 0)
481		goto errout;
482
483	if (oldprog) {
484		if (handle && oldprog->handle != handle) {
485			ret = -EINVAL;
486			goto errout;
487		}
488	}
489
490	if (handle == 0) {
491		handle = 1;
492		ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
493				    INT_MAX, GFP_KERNEL);
494	} else if (!oldprog) {
495		ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
496				    handle, GFP_KERNEL);
497	}
498
499	if (ret)
500		goto errout;
501	prog->handle = handle;
502
503	ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr,
504				extack);
505	if (ret < 0)
506		goto errout_idr;
507
508	ret = cls_bpf_offload(tp, prog, oldprog, extack);
509	if (ret)
510		goto errout_parms;
511
512	if (!tc_in_hw(prog->gen_flags))
513		prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
514
515	if (oldprog) {
516		idr_replace(&head->handle_idr, prog, handle);
517		list_replace_rcu(&oldprog->link, &prog->link);
518		tcf_unbind_filter(tp, &oldprog->res);
519		tcf_exts_get_net(&oldprog->exts);
520		tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work);
521	} else {
522		list_add_rcu(&prog->link, &head->plist);
523	}
524
525	*arg = prog;
526	return 0;
527
528errout_parms:
529	cls_bpf_free_parms(prog);
530errout_idr:
531	if (!oldprog)
532		idr_remove(&head->handle_idr, prog->handle);
533errout:
534	tcf_exts_destroy(&prog->exts);
535	kfree(prog);
536	return ret;
537}
538
539static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
540				 struct sk_buff *skb)
541{
542	struct nlattr *nla;
543
544	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
545		return -EMSGSIZE;
546
547	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
548			  sizeof(struct sock_filter));
549	if (nla == NULL)
550		return -EMSGSIZE;
551
552	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
553
554	return 0;
555}
556
557static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
558				  struct sk_buff *skb)
559{
560	struct nlattr *nla;
561
562	if (prog->bpf_name &&
563	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
564		return -EMSGSIZE;
565
566	if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
567		return -EMSGSIZE;
568
569	nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
570	if (nla == NULL)
571		return -EMSGSIZE;
572
573	memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
574
575	return 0;
576}
577
578static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
579			struct sk_buff *skb, struct tcmsg *tm, bool rtnl_held)
580{
581	struct cls_bpf_prog *prog = fh;
582	struct nlattr *nest;
583	u32 bpf_flags = 0;
584	int ret;
585
586	if (prog == NULL)
587		return skb->len;
588
589	tm->tcm_handle = prog->handle;
590
591	cls_bpf_offload_update_stats(tp, prog);
592
593	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
594	if (nest == NULL)
595		goto nla_put_failure;
596
597	if (prog->res.classid &&
598	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
599		goto nla_put_failure;
600
601	if (cls_bpf_is_ebpf(prog))
602		ret = cls_bpf_dump_ebpf_info(prog, skb);
603	else
604		ret = cls_bpf_dump_bpf_info(prog, skb);
605	if (ret)
606		goto nla_put_failure;
607
608	if (tcf_exts_dump(skb, &prog->exts) < 0)
609		goto nla_put_failure;
610
611	if (prog->exts_integrated)
612		bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
613	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
614		goto nla_put_failure;
615	if (prog->gen_flags &&
616	    nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
617		goto nla_put_failure;
618
619	nla_nest_end(skb, nest);
620
621	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
622		goto nla_put_failure;
623
624	return skb->len;
625
626nla_put_failure:
627	nla_nest_cancel(skb, nest);
628	return -1;
629}
630
631static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl,
632			       void *q, unsigned long base)
633{
634	struct cls_bpf_prog *prog = fh;
635
636	if (prog && prog->res.classid == classid) {
637		if (cl)
638			__tcf_bind_filter(q, &prog->res, base);
639		else
640			__tcf_unbind_filter(q, &prog->res);
641	}
642}
643
644static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
645			 bool rtnl_held)
646{
647	struct cls_bpf_head *head = rtnl_dereference(tp->root);
648	struct cls_bpf_prog *prog;
649
650	list_for_each_entry(prog, &head->plist, link) {
651		if (arg->count < arg->skip)
652			goto skip;
653		if (arg->fn(tp, prog, arg) < 0) {
654			arg->stop = 1;
655			break;
656		}
657skip:
658		arg->count++;
659	}
660}
661
662static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
663			     void *cb_priv, struct netlink_ext_ack *extack)
664{
665	struct cls_bpf_head *head = rtnl_dereference(tp->root);
666	struct tcf_block *block = tp->chain->block;
667	struct tc_cls_bpf_offload cls_bpf = {};
668	struct cls_bpf_prog *prog;
669	int err;
670
671	list_for_each_entry(prog, &head->plist, link) {
672		if (tc_skip_hw(prog->gen_flags))
673			continue;
674
675		tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
676					   extack);
677		cls_bpf.command = TC_CLSBPF_OFFLOAD;
678		cls_bpf.exts = &prog->exts;
679		cls_bpf.prog = add ? prog->filter : NULL;
680		cls_bpf.oldprog = add ? NULL : prog->filter;
681		cls_bpf.name = prog->bpf_name;
682		cls_bpf.exts_integrated = prog->exts_integrated;
683
684		err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSBPF,
685					    &cls_bpf, cb_priv, &prog->gen_flags,
686					    &prog->in_hw_count);
687		if (err)
688			return err;
689	}
690
691	return 0;
692}
693
694static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
695	.kind		=	"bpf",
696	.owner		=	THIS_MODULE,
697	.classify	=	cls_bpf_classify,
698	.init		=	cls_bpf_init,
699	.destroy	=	cls_bpf_destroy,
700	.get		=	cls_bpf_get,
701	.change		=	cls_bpf_change,
702	.delete		=	cls_bpf_delete,
703	.walk		=	cls_bpf_walk,
704	.reoffload	=	cls_bpf_reoffload,
705	.dump		=	cls_bpf_dump,
706	.bind_class	=	cls_bpf_bind_class,
707};
708
709static int __init cls_bpf_init_mod(void)
710{
711	return register_tcf_proto_ops(&cls_bpf_ops);
712}
713
714static void __exit cls_bpf_exit_mod(void)
715{
716	unregister_tcf_proto_ops(&cls_bpf_ops);
717}
718
719module_init(cls_bpf_init_mod);
720module_exit(cls_bpf_exit_mod);