Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/cls_flow.c Generic flow classifier
4 *
5 * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/list.h>
11#include <linux/jhash.h>
12#include <linux/random.h>
13#include <linux/pkt_cls.h>
14#include <linux/skbuff.h>
15#include <linux/in.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <linux/if_vlan.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <net/inet_sock.h>
22
23#include <net/pkt_cls.h>
24#include <net/ip.h>
25#include <net/route.h>
26#include <net/flow_dissector.h>
27
28#if IS_ENABLED(CONFIG_NF_CONNTRACK)
29#include <net/netfilter/nf_conntrack.h>
30#endif
31
32struct flow_head {
33 struct list_head filters;
34 struct rcu_head rcu;
35};
36
37struct flow_filter {
38 struct list_head list;
39 struct tcf_exts exts;
40 struct tcf_ematch_tree ematches;
41 struct tcf_proto *tp;
42 struct timer_list perturb_timer;
43 u32 perturb_period;
44 u32 handle;
45
46 u32 nkeys;
47 u32 keymask;
48 u32 mode;
49 u32 mask;
50 u32 xor;
51 u32 rshift;
52 u32 addend;
53 u32 divisor;
54 u32 baseclass;
55 u32 hashrnd;
56 struct rcu_work rwork;
57};
58
59static inline u32 addr_fold(void *addr)
60{
61 unsigned long a = (unsigned long)addr;
62
63 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
64}
65
66static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
67{
68 __be32 src = flow_get_u32_src(flow);
69
70 if (src)
71 return ntohl(src);
72
73 return addr_fold(skb->sk);
74}
75
76static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
77{
78 __be32 dst = flow_get_u32_dst(flow);
79
80 if (dst)
81 return ntohl(dst);
82
83 return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
84}
85
86static u32 flow_get_proto(const struct sk_buff *skb,
87 const struct flow_keys *flow)
88{
89 return flow->basic.ip_proto;
90}
91
92static u32 flow_get_proto_src(const struct sk_buff *skb,
93 const struct flow_keys *flow)
94{
95 if (flow->ports.ports)
96 return ntohs(flow->ports.src);
97
98 return addr_fold(skb->sk);
99}
100
101static u32 flow_get_proto_dst(const struct sk_buff *skb,
102 const struct flow_keys *flow)
103{
104 if (flow->ports.ports)
105 return ntohs(flow->ports.dst);
106
107 return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
108}
109
110static u32 flow_get_iif(const struct sk_buff *skb)
111{
112 return skb->skb_iif;
113}
114
115static u32 flow_get_priority(const struct sk_buff *skb)
116{
117 return skb->priority;
118}
119
120static u32 flow_get_mark(const struct sk_buff *skb)
121{
122 return skb->mark;
123}
124
125static u32 flow_get_nfct(const struct sk_buff *skb)
126{
127#if IS_ENABLED(CONFIG_NF_CONNTRACK)
128 return addr_fold(skb_nfct(skb));
129#else
130 return 0;
131#endif
132}
133
134#if IS_ENABLED(CONFIG_NF_CONNTRACK)
135#define CTTUPLE(skb, member) \
136({ \
137 enum ip_conntrack_info ctinfo; \
138 const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
139 if (ct == NULL) \
140 goto fallback; \
141 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \
142})
143#else
144#define CTTUPLE(skb, member) \
145({ \
146 goto fallback; \
147 0; \
148})
149#endif
150
151static u32 flow_get_nfct_src(const struct sk_buff *skb,
152 const struct flow_keys *flow)
153{
154 switch (tc_skb_protocol(skb)) {
155 case htons(ETH_P_IP):
156 return ntohl(CTTUPLE(skb, src.u3.ip));
157 case htons(ETH_P_IPV6):
158 return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
159 }
160fallback:
161 return flow_get_src(skb, flow);
162}
163
164static u32 flow_get_nfct_dst(const struct sk_buff *skb,
165 const struct flow_keys *flow)
166{
167 switch (tc_skb_protocol(skb)) {
168 case htons(ETH_P_IP):
169 return ntohl(CTTUPLE(skb, dst.u3.ip));
170 case htons(ETH_P_IPV6):
171 return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
172 }
173fallback:
174 return flow_get_dst(skb, flow);
175}
176
177static u32 flow_get_nfct_proto_src(const struct sk_buff *skb,
178 const struct flow_keys *flow)
179{
180 return ntohs(CTTUPLE(skb, src.u.all));
181fallback:
182 return flow_get_proto_src(skb, flow);
183}
184
185static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb,
186 const struct flow_keys *flow)
187{
188 return ntohs(CTTUPLE(skb, dst.u.all));
189fallback:
190 return flow_get_proto_dst(skb, flow);
191}
192
193static u32 flow_get_rtclassid(const struct sk_buff *skb)
194{
195#ifdef CONFIG_IP_ROUTE_CLASSID
196 if (skb_dst(skb))
197 return skb_dst(skb)->tclassid;
198#endif
199 return 0;
200}
201
202static u32 flow_get_skuid(const struct sk_buff *skb)
203{
204 struct sock *sk = skb_to_full_sk(skb);
205
206 if (sk && sk->sk_socket && sk->sk_socket->file) {
207 kuid_t skuid = sk->sk_socket->file->f_cred->fsuid;
208
209 return from_kuid(&init_user_ns, skuid);
210 }
211 return 0;
212}
213
214static u32 flow_get_skgid(const struct sk_buff *skb)
215{
216 struct sock *sk = skb_to_full_sk(skb);
217
218 if (sk && sk->sk_socket && sk->sk_socket->file) {
219 kgid_t skgid = sk->sk_socket->file->f_cred->fsgid;
220
221 return from_kgid(&init_user_ns, skgid);
222 }
223 return 0;
224}
225
226static u32 flow_get_vlan_tag(const struct sk_buff *skb)
227{
228 u16 uninitialized_var(tag);
229
230 if (vlan_get_tag(skb, &tag) < 0)
231 return 0;
232 return tag & VLAN_VID_MASK;
233}
234
235static u32 flow_get_rxhash(struct sk_buff *skb)
236{
237 return skb_get_hash(skb);
238}
239
240static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
241{
242 switch (key) {
243 case FLOW_KEY_SRC:
244 return flow_get_src(skb, flow);
245 case FLOW_KEY_DST:
246 return flow_get_dst(skb, flow);
247 case FLOW_KEY_PROTO:
248 return flow_get_proto(skb, flow);
249 case FLOW_KEY_PROTO_SRC:
250 return flow_get_proto_src(skb, flow);
251 case FLOW_KEY_PROTO_DST:
252 return flow_get_proto_dst(skb, flow);
253 case FLOW_KEY_IIF:
254 return flow_get_iif(skb);
255 case FLOW_KEY_PRIORITY:
256 return flow_get_priority(skb);
257 case FLOW_KEY_MARK:
258 return flow_get_mark(skb);
259 case FLOW_KEY_NFCT:
260 return flow_get_nfct(skb);
261 case FLOW_KEY_NFCT_SRC:
262 return flow_get_nfct_src(skb, flow);
263 case FLOW_KEY_NFCT_DST:
264 return flow_get_nfct_dst(skb, flow);
265 case FLOW_KEY_NFCT_PROTO_SRC:
266 return flow_get_nfct_proto_src(skb, flow);
267 case FLOW_KEY_NFCT_PROTO_DST:
268 return flow_get_nfct_proto_dst(skb, flow);
269 case FLOW_KEY_RTCLASSID:
270 return flow_get_rtclassid(skb);
271 case FLOW_KEY_SKUID:
272 return flow_get_skuid(skb);
273 case FLOW_KEY_SKGID:
274 return flow_get_skgid(skb);
275 case FLOW_KEY_VLAN_TAG:
276 return flow_get_vlan_tag(skb);
277 case FLOW_KEY_RXHASH:
278 return flow_get_rxhash(skb);
279 default:
280 WARN_ON(1);
281 return 0;
282 }
283}
284
285#define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | \
286 (1 << FLOW_KEY_DST) | \
287 (1 << FLOW_KEY_PROTO) | \
288 (1 << FLOW_KEY_PROTO_SRC) | \
289 (1 << FLOW_KEY_PROTO_DST) | \
290 (1 << FLOW_KEY_NFCT_SRC) | \
291 (1 << FLOW_KEY_NFCT_DST) | \
292 (1 << FLOW_KEY_NFCT_PROTO_SRC) | \
293 (1 << FLOW_KEY_NFCT_PROTO_DST))
294
295static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
296 struct tcf_result *res)
297{
298 struct flow_head *head = rcu_dereference_bh(tp->root);
299 struct flow_filter *f;
300 u32 keymask;
301 u32 classid;
302 unsigned int n, key;
303 int r;
304
305 list_for_each_entry_rcu(f, &head->filters, list) {
306 u32 keys[FLOW_KEY_MAX + 1];
307 struct flow_keys flow_keys;
308
309 if (!tcf_em_tree_match(skb, &f->ematches, NULL))
310 continue;
311
312 keymask = f->keymask;
313 if (keymask & FLOW_KEYS_NEEDED)
314 skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
315
316 for (n = 0; n < f->nkeys; n++) {
317 key = ffs(keymask) - 1;
318 keymask &= ~(1 << key);
319 keys[n] = flow_key_get(skb, key, &flow_keys);
320 }
321
322 if (f->mode == FLOW_MODE_HASH)
323 classid = jhash2(keys, f->nkeys, f->hashrnd);
324 else {
325 classid = keys[0];
326 classid = (classid & f->mask) ^ f->xor;
327 classid = (classid >> f->rshift) + f->addend;
328 }
329
330 if (f->divisor)
331 classid %= f->divisor;
332
333 res->class = 0;
334 res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
335
336 r = tcf_exts_exec(skb, &f->exts, res);
337 if (r < 0)
338 continue;
339 return r;
340 }
341 return -1;
342}
343
344static void flow_perturbation(struct timer_list *t)
345{
346 struct flow_filter *f = from_timer(f, t, perturb_timer);
347
348 get_random_bytes(&f->hashrnd, 4);
349 if (f->perturb_period)
350 mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
351}
352
353static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
354 [TCA_FLOW_KEYS] = { .type = NLA_U32 },
355 [TCA_FLOW_MODE] = { .type = NLA_U32 },
356 [TCA_FLOW_BASECLASS] = { .type = NLA_U32 },
357 [TCA_FLOW_RSHIFT] = { .type = NLA_U32 },
358 [TCA_FLOW_ADDEND] = { .type = NLA_U32 },
359 [TCA_FLOW_MASK] = { .type = NLA_U32 },
360 [TCA_FLOW_XOR] = { .type = NLA_U32 },
361 [TCA_FLOW_DIVISOR] = { .type = NLA_U32 },
362 [TCA_FLOW_ACT] = { .type = NLA_NESTED },
363 [TCA_FLOW_POLICE] = { .type = NLA_NESTED },
364 [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED },
365 [TCA_FLOW_PERTURB] = { .type = NLA_U32 },
366};
367
368static void __flow_destroy_filter(struct flow_filter *f)
369{
370 del_timer_sync(&f->perturb_timer);
371 tcf_exts_destroy(&f->exts);
372 tcf_em_tree_destroy(&f->ematches);
373 tcf_exts_put_net(&f->exts);
374 kfree(f);
375}
376
377static void flow_destroy_filter_work(struct work_struct *work)
378{
379 struct flow_filter *f = container_of(to_rcu_work(work),
380 struct flow_filter,
381 rwork);
382 rtnl_lock();
383 __flow_destroy_filter(f);
384 rtnl_unlock();
385}
386
387static int flow_change(struct net *net, struct sk_buff *in_skb,
388 struct tcf_proto *tp, unsigned long base,
389 u32 handle, struct nlattr **tca,
390 void **arg, bool ovr, bool rtnl_held,
391 struct netlink_ext_ack *extack)
392{
393 struct flow_head *head = rtnl_dereference(tp->root);
394 struct flow_filter *fold, *fnew;
395 struct nlattr *opt = tca[TCA_OPTIONS];
396 struct nlattr *tb[TCA_FLOW_MAX + 1];
397 unsigned int nkeys = 0;
398 unsigned int perturb_period = 0;
399 u32 baseclass = 0;
400 u32 keymask = 0;
401 u32 mode;
402 int err;
403
404 if (opt == NULL)
405 return -EINVAL;
406
407 err = nla_parse_nested_deprecated(tb, TCA_FLOW_MAX, opt, flow_policy,
408 NULL);
409 if (err < 0)
410 return err;
411
412 if (tb[TCA_FLOW_BASECLASS]) {
413 baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
414 if (TC_H_MIN(baseclass) == 0)
415 return -EINVAL;
416 }
417
418 if (tb[TCA_FLOW_KEYS]) {
419 keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
420
421 nkeys = hweight32(keymask);
422 if (nkeys == 0)
423 return -EINVAL;
424
425 if (fls(keymask) - 1 > FLOW_KEY_MAX)
426 return -EOPNOTSUPP;
427
428 if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
429 sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns)
430 return -EOPNOTSUPP;
431 }
432
433 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
434 if (!fnew)
435 return -ENOBUFS;
436
437 err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
438 if (err < 0)
439 goto err1;
440
441 err = tcf_exts_init(&fnew->exts, net, TCA_FLOW_ACT, TCA_FLOW_POLICE);
442 if (err < 0)
443 goto err2;
444
445 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr,
446 true, extack);
447 if (err < 0)
448 goto err2;
449
450 fold = *arg;
451 if (fold) {
452 err = -EINVAL;
453 if (fold->handle != handle && handle)
454 goto err2;
455
456 /* Copy fold into fnew */
457 fnew->tp = fold->tp;
458 fnew->handle = fold->handle;
459 fnew->nkeys = fold->nkeys;
460 fnew->keymask = fold->keymask;
461 fnew->mode = fold->mode;
462 fnew->mask = fold->mask;
463 fnew->xor = fold->xor;
464 fnew->rshift = fold->rshift;
465 fnew->addend = fold->addend;
466 fnew->divisor = fold->divisor;
467 fnew->baseclass = fold->baseclass;
468 fnew->hashrnd = fold->hashrnd;
469
470 mode = fold->mode;
471 if (tb[TCA_FLOW_MODE])
472 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
473 if (mode != FLOW_MODE_HASH && nkeys > 1)
474 goto err2;
475
476 if (mode == FLOW_MODE_HASH)
477 perturb_period = fold->perturb_period;
478 if (tb[TCA_FLOW_PERTURB]) {
479 if (mode != FLOW_MODE_HASH)
480 goto err2;
481 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
482 }
483 } else {
484 err = -EINVAL;
485 if (!handle)
486 goto err2;
487 if (!tb[TCA_FLOW_KEYS])
488 goto err2;
489
490 mode = FLOW_MODE_MAP;
491 if (tb[TCA_FLOW_MODE])
492 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
493 if (mode != FLOW_MODE_HASH && nkeys > 1)
494 goto err2;
495
496 if (tb[TCA_FLOW_PERTURB]) {
497 if (mode != FLOW_MODE_HASH)
498 goto err2;
499 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
500 }
501
502 if (TC_H_MAJ(baseclass) == 0) {
503 struct Qdisc *q = tcf_block_q(tp->chain->block);
504
505 baseclass = TC_H_MAKE(q->handle, baseclass);
506 }
507 if (TC_H_MIN(baseclass) == 0)
508 baseclass = TC_H_MAKE(baseclass, 1);
509
510 fnew->handle = handle;
511 fnew->mask = ~0U;
512 fnew->tp = tp;
513 get_random_bytes(&fnew->hashrnd, 4);
514 }
515
516 timer_setup(&fnew->perturb_timer, flow_perturbation, TIMER_DEFERRABLE);
517
518 tcf_block_netif_keep_dst(tp->chain->block);
519
520 if (tb[TCA_FLOW_KEYS]) {
521 fnew->keymask = keymask;
522 fnew->nkeys = nkeys;
523 }
524
525 fnew->mode = mode;
526
527 if (tb[TCA_FLOW_MASK])
528 fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
529 if (tb[TCA_FLOW_XOR])
530 fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
531 if (tb[TCA_FLOW_RSHIFT])
532 fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
533 if (tb[TCA_FLOW_ADDEND])
534 fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
535
536 if (tb[TCA_FLOW_DIVISOR])
537 fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
538 if (baseclass)
539 fnew->baseclass = baseclass;
540
541 fnew->perturb_period = perturb_period;
542 if (perturb_period)
543 mod_timer(&fnew->perturb_timer, jiffies + perturb_period);
544
545 if (!*arg)
546 list_add_tail_rcu(&fnew->list, &head->filters);
547 else
548 list_replace_rcu(&fold->list, &fnew->list);
549
550 *arg = fnew;
551
552 if (fold) {
553 tcf_exts_get_net(&fold->exts);
554 tcf_queue_work(&fold->rwork, flow_destroy_filter_work);
555 }
556 return 0;
557
558err2:
559 tcf_exts_destroy(&fnew->exts);
560 tcf_em_tree_destroy(&fnew->ematches);
561err1:
562 kfree(fnew);
563 return err;
564}
565
566static int flow_delete(struct tcf_proto *tp, void *arg, bool *last,
567 bool rtnl_held, struct netlink_ext_ack *extack)
568{
569 struct flow_head *head = rtnl_dereference(tp->root);
570 struct flow_filter *f = arg;
571
572 list_del_rcu(&f->list);
573 tcf_exts_get_net(&f->exts);
574 tcf_queue_work(&f->rwork, flow_destroy_filter_work);
575 *last = list_empty(&head->filters);
576 return 0;
577}
578
579static int flow_init(struct tcf_proto *tp)
580{
581 struct flow_head *head;
582
583 head = kzalloc(sizeof(*head), GFP_KERNEL);
584 if (head == NULL)
585 return -ENOBUFS;
586 INIT_LIST_HEAD(&head->filters);
587 rcu_assign_pointer(tp->root, head);
588 return 0;
589}
590
591static void flow_destroy(struct tcf_proto *tp, bool rtnl_held,
592 struct netlink_ext_ack *extack)
593{
594 struct flow_head *head = rtnl_dereference(tp->root);
595 struct flow_filter *f, *next;
596
597 list_for_each_entry_safe(f, next, &head->filters, list) {
598 list_del_rcu(&f->list);
599 if (tcf_exts_get_net(&f->exts))
600 tcf_queue_work(&f->rwork, flow_destroy_filter_work);
601 else
602 __flow_destroy_filter(f);
603 }
604 kfree_rcu(head, rcu);
605}
606
607static void *flow_get(struct tcf_proto *tp, u32 handle)
608{
609 struct flow_head *head = rtnl_dereference(tp->root);
610 struct flow_filter *f;
611
612 list_for_each_entry(f, &head->filters, list)
613 if (f->handle == handle)
614 return f;
615 return NULL;
616}
617
618static int flow_dump(struct net *net, struct tcf_proto *tp, void *fh,
619 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
620{
621 struct flow_filter *f = fh;
622 struct nlattr *nest;
623
624 if (f == NULL)
625 return skb->len;
626
627 t->tcm_handle = f->handle;
628
629 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
630 if (nest == NULL)
631 goto nla_put_failure;
632
633 if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
634 nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
635 goto nla_put_failure;
636
637 if (f->mask != ~0 || f->xor != 0) {
638 if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
639 nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
640 goto nla_put_failure;
641 }
642 if (f->rshift &&
643 nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
644 goto nla_put_failure;
645 if (f->addend &&
646 nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
647 goto nla_put_failure;
648
649 if (f->divisor &&
650 nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
651 goto nla_put_failure;
652 if (f->baseclass &&
653 nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
654 goto nla_put_failure;
655
656 if (f->perturb_period &&
657 nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
658 goto nla_put_failure;
659
660 if (tcf_exts_dump(skb, &f->exts) < 0)
661 goto nla_put_failure;
662#ifdef CONFIG_NET_EMATCH
663 if (f->ematches.hdr.nmatches &&
664 tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
665 goto nla_put_failure;
666#endif
667 nla_nest_end(skb, nest);
668
669 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
670 goto nla_put_failure;
671
672 return skb->len;
673
674nla_put_failure:
675 nla_nest_cancel(skb, nest);
676 return -1;
677}
678
679static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg,
680 bool rtnl_held)
681{
682 struct flow_head *head = rtnl_dereference(tp->root);
683 struct flow_filter *f;
684
685 list_for_each_entry(f, &head->filters, list) {
686 if (arg->count < arg->skip)
687 goto skip;
688 if (arg->fn(tp, f, arg) < 0) {
689 arg->stop = 1;
690 break;
691 }
692skip:
693 arg->count++;
694 }
695}
696
697static struct tcf_proto_ops cls_flow_ops __read_mostly = {
698 .kind = "flow",
699 .classify = flow_classify,
700 .init = flow_init,
701 .destroy = flow_destroy,
702 .change = flow_change,
703 .delete = flow_delete,
704 .get = flow_get,
705 .dump = flow_dump,
706 .walk = flow_walk,
707 .owner = THIS_MODULE,
708};
709
710static int __init cls_flow_init(void)
711{
712 return register_tcf_proto_ops(&cls_flow_ops);
713}
714
715static void __exit cls_flow_exit(void)
716{
717 unregister_tcf_proto_ops(&cls_flow_ops);
718}
719
720module_init(cls_flow_init);
721module_exit(cls_flow_exit);
722
723MODULE_LICENSE("GPL");
724MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
725MODULE_DESCRIPTION("TC flow classifier");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/cls_flow.c Generic flow classifier
4 *
5 * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/list.h>
11#include <linux/jhash.h>
12#include <linux/random.h>
13#include <linux/pkt_cls.h>
14#include <linux/skbuff.h>
15#include <linux/in.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <linux/if_vlan.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <net/inet_sock.h>
22
23#include <net/pkt_cls.h>
24#include <net/ip.h>
25#include <net/route.h>
26#include <net/flow_dissector.h>
27#include <net/tc_wrapper.h>
28
29#if IS_ENABLED(CONFIG_NF_CONNTRACK)
30#include <net/netfilter/nf_conntrack.h>
31#endif
32
33struct flow_head {
34 struct list_head filters;
35 struct rcu_head rcu;
36};
37
38struct flow_filter {
39 struct list_head list;
40 struct tcf_exts exts;
41 struct tcf_ematch_tree ematches;
42 struct tcf_proto *tp;
43 struct timer_list perturb_timer;
44 u32 perturb_period;
45 u32 handle;
46
47 u32 nkeys;
48 u32 keymask;
49 u32 mode;
50 u32 mask;
51 u32 xor;
52 u32 rshift;
53 u32 addend;
54 u32 divisor;
55 u32 baseclass;
56 u32 hashrnd;
57 struct rcu_work rwork;
58};
59
60static inline u32 addr_fold(void *addr)
61{
62 unsigned long a = (unsigned long)addr;
63
64 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
65}
66
67static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
68{
69 __be32 src = flow_get_u32_src(flow);
70
71 if (src)
72 return ntohl(src);
73
74 return addr_fold(skb->sk);
75}
76
77static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
78{
79 __be32 dst = flow_get_u32_dst(flow);
80
81 if (dst)
82 return ntohl(dst);
83
84 return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
85}
86
87static u32 flow_get_proto(const struct sk_buff *skb,
88 const struct flow_keys *flow)
89{
90 return flow->basic.ip_proto;
91}
92
93static u32 flow_get_proto_src(const struct sk_buff *skb,
94 const struct flow_keys *flow)
95{
96 if (flow->ports.ports)
97 return ntohs(flow->ports.src);
98
99 return addr_fold(skb->sk);
100}
101
102static u32 flow_get_proto_dst(const struct sk_buff *skb,
103 const struct flow_keys *flow)
104{
105 if (flow->ports.ports)
106 return ntohs(flow->ports.dst);
107
108 return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
109}
110
111static u32 flow_get_iif(const struct sk_buff *skb)
112{
113 return skb->skb_iif;
114}
115
116static u32 flow_get_priority(const struct sk_buff *skb)
117{
118 return skb->priority;
119}
120
121static u32 flow_get_mark(const struct sk_buff *skb)
122{
123 return skb->mark;
124}
125
126static u32 flow_get_nfct(const struct sk_buff *skb)
127{
128#if IS_ENABLED(CONFIG_NF_CONNTRACK)
129 return addr_fold(skb_nfct(skb));
130#else
131 return 0;
132#endif
133}
134
135#if IS_ENABLED(CONFIG_NF_CONNTRACK)
136#define CTTUPLE(skb, member) \
137({ \
138 enum ip_conntrack_info ctinfo; \
139 const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
140 if (ct == NULL) \
141 goto fallback; \
142 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \
143})
144#else
145#define CTTUPLE(skb, member) \
146({ \
147 goto fallback; \
148 0; \
149})
150#endif
151
152static u32 flow_get_nfct_src(const struct sk_buff *skb,
153 const struct flow_keys *flow)
154{
155 switch (skb_protocol(skb, true)) {
156 case htons(ETH_P_IP):
157 return ntohl(CTTUPLE(skb, src.u3.ip));
158 case htons(ETH_P_IPV6):
159 return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
160 }
161fallback:
162 return flow_get_src(skb, flow);
163}
164
165static u32 flow_get_nfct_dst(const struct sk_buff *skb,
166 const struct flow_keys *flow)
167{
168 switch (skb_protocol(skb, true)) {
169 case htons(ETH_P_IP):
170 return ntohl(CTTUPLE(skb, dst.u3.ip));
171 case htons(ETH_P_IPV6):
172 return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
173 }
174fallback:
175 return flow_get_dst(skb, flow);
176}
177
178static u32 flow_get_nfct_proto_src(const struct sk_buff *skb,
179 const struct flow_keys *flow)
180{
181 return ntohs(CTTUPLE(skb, src.u.all));
182fallback:
183 return flow_get_proto_src(skb, flow);
184}
185
186static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb,
187 const struct flow_keys *flow)
188{
189 return ntohs(CTTUPLE(skb, dst.u.all));
190fallback:
191 return flow_get_proto_dst(skb, flow);
192}
193
194static u32 flow_get_rtclassid(const struct sk_buff *skb)
195{
196#ifdef CONFIG_IP_ROUTE_CLASSID
197 if (skb_dst(skb))
198 return skb_dst(skb)->tclassid;
199#endif
200 return 0;
201}
202
203static u32 flow_get_skuid(const struct sk_buff *skb)
204{
205 struct sock *sk = skb_to_full_sk(skb);
206
207 if (sk && sk->sk_socket && sk->sk_socket->file) {
208 kuid_t skuid = sk->sk_socket->file->f_cred->fsuid;
209
210 return from_kuid(&init_user_ns, skuid);
211 }
212 return 0;
213}
214
215static u32 flow_get_skgid(const struct sk_buff *skb)
216{
217 struct sock *sk = skb_to_full_sk(skb);
218
219 if (sk && sk->sk_socket && sk->sk_socket->file) {
220 kgid_t skgid = sk->sk_socket->file->f_cred->fsgid;
221
222 return from_kgid(&init_user_ns, skgid);
223 }
224 return 0;
225}
226
227static u32 flow_get_vlan_tag(const struct sk_buff *skb)
228{
229 u16 tag;
230
231 if (vlan_get_tag(skb, &tag) < 0)
232 return 0;
233 return tag & VLAN_VID_MASK;
234}
235
236static u32 flow_get_rxhash(struct sk_buff *skb)
237{
238 return skb_get_hash(skb);
239}
240
241static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
242{
243 switch (key) {
244 case FLOW_KEY_SRC:
245 return flow_get_src(skb, flow);
246 case FLOW_KEY_DST:
247 return flow_get_dst(skb, flow);
248 case FLOW_KEY_PROTO:
249 return flow_get_proto(skb, flow);
250 case FLOW_KEY_PROTO_SRC:
251 return flow_get_proto_src(skb, flow);
252 case FLOW_KEY_PROTO_DST:
253 return flow_get_proto_dst(skb, flow);
254 case FLOW_KEY_IIF:
255 return flow_get_iif(skb);
256 case FLOW_KEY_PRIORITY:
257 return flow_get_priority(skb);
258 case FLOW_KEY_MARK:
259 return flow_get_mark(skb);
260 case FLOW_KEY_NFCT:
261 return flow_get_nfct(skb);
262 case FLOW_KEY_NFCT_SRC:
263 return flow_get_nfct_src(skb, flow);
264 case FLOW_KEY_NFCT_DST:
265 return flow_get_nfct_dst(skb, flow);
266 case FLOW_KEY_NFCT_PROTO_SRC:
267 return flow_get_nfct_proto_src(skb, flow);
268 case FLOW_KEY_NFCT_PROTO_DST:
269 return flow_get_nfct_proto_dst(skb, flow);
270 case FLOW_KEY_RTCLASSID:
271 return flow_get_rtclassid(skb);
272 case FLOW_KEY_SKUID:
273 return flow_get_skuid(skb);
274 case FLOW_KEY_SKGID:
275 return flow_get_skgid(skb);
276 case FLOW_KEY_VLAN_TAG:
277 return flow_get_vlan_tag(skb);
278 case FLOW_KEY_RXHASH:
279 return flow_get_rxhash(skb);
280 default:
281 WARN_ON(1);
282 return 0;
283 }
284}
285
286#define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | \
287 (1 << FLOW_KEY_DST) | \
288 (1 << FLOW_KEY_PROTO) | \
289 (1 << FLOW_KEY_PROTO_SRC) | \
290 (1 << FLOW_KEY_PROTO_DST) | \
291 (1 << FLOW_KEY_NFCT_SRC) | \
292 (1 << FLOW_KEY_NFCT_DST) | \
293 (1 << FLOW_KEY_NFCT_PROTO_SRC) | \
294 (1 << FLOW_KEY_NFCT_PROTO_DST))
295
296TC_INDIRECT_SCOPE int flow_classify(struct sk_buff *skb,
297 const struct tcf_proto *tp,
298 struct tcf_result *res)
299{
300 struct flow_head *head = rcu_dereference_bh(tp->root);
301 struct flow_filter *f;
302 u32 keymask;
303 u32 classid;
304 unsigned int n, key;
305 int r;
306
307 list_for_each_entry_rcu(f, &head->filters, list) {
308 u32 keys[FLOW_KEY_MAX + 1];
309 struct flow_keys flow_keys;
310
311 if (!tcf_em_tree_match(skb, &f->ematches, NULL))
312 continue;
313
314 keymask = f->keymask;
315 if (keymask & FLOW_KEYS_NEEDED)
316 skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
317
318 for (n = 0; n < f->nkeys; n++) {
319 key = ffs(keymask) - 1;
320 keymask &= ~(1 << key);
321 keys[n] = flow_key_get(skb, key, &flow_keys);
322 }
323
324 if (f->mode == FLOW_MODE_HASH)
325 classid = jhash2(keys, f->nkeys, f->hashrnd);
326 else {
327 classid = keys[0];
328 classid = (classid & f->mask) ^ f->xor;
329 classid = (classid >> f->rshift) + f->addend;
330 }
331
332 if (f->divisor)
333 classid %= f->divisor;
334
335 res->class = 0;
336 res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
337
338 r = tcf_exts_exec(skb, &f->exts, res);
339 if (r < 0)
340 continue;
341 return r;
342 }
343 return -1;
344}
345
346static void flow_perturbation(struct timer_list *t)
347{
348 struct flow_filter *f = from_timer(f, t, perturb_timer);
349
350 get_random_bytes(&f->hashrnd, 4);
351 if (f->perturb_period)
352 mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
353}
354
355static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
356 [TCA_FLOW_KEYS] = { .type = NLA_U32 },
357 [TCA_FLOW_MODE] = { .type = NLA_U32 },
358 [TCA_FLOW_BASECLASS] = { .type = NLA_U32 },
359 [TCA_FLOW_RSHIFT] = { .type = NLA_U32 },
360 [TCA_FLOW_ADDEND] = { .type = NLA_U32 },
361 [TCA_FLOW_MASK] = { .type = NLA_U32 },
362 [TCA_FLOW_XOR] = { .type = NLA_U32 },
363 [TCA_FLOW_DIVISOR] = { .type = NLA_U32 },
364 [TCA_FLOW_ACT] = { .type = NLA_NESTED },
365 [TCA_FLOW_POLICE] = { .type = NLA_NESTED },
366 [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED },
367 [TCA_FLOW_PERTURB] = { .type = NLA_U32 },
368};
369
370static void __flow_destroy_filter(struct flow_filter *f)
371{
372 timer_shutdown_sync(&f->perturb_timer);
373 tcf_exts_destroy(&f->exts);
374 tcf_em_tree_destroy(&f->ematches);
375 tcf_exts_put_net(&f->exts);
376 kfree(f);
377}
378
379static void flow_destroy_filter_work(struct work_struct *work)
380{
381 struct flow_filter *f = container_of(to_rcu_work(work),
382 struct flow_filter,
383 rwork);
384 rtnl_lock();
385 __flow_destroy_filter(f);
386 rtnl_unlock();
387}
388
389static int flow_change(struct net *net, struct sk_buff *in_skb,
390 struct tcf_proto *tp, unsigned long base,
391 u32 handle, struct nlattr **tca,
392 void **arg, u32 flags,
393 struct netlink_ext_ack *extack)
394{
395 struct flow_head *head = rtnl_dereference(tp->root);
396 struct flow_filter *fold, *fnew;
397 struct nlattr *opt = tca[TCA_OPTIONS];
398 struct nlattr *tb[TCA_FLOW_MAX + 1];
399 unsigned int nkeys = 0;
400 unsigned int perturb_period = 0;
401 u32 baseclass = 0;
402 u32 keymask = 0;
403 u32 mode;
404 int err;
405
406 if (opt == NULL)
407 return -EINVAL;
408
409 err = nla_parse_nested_deprecated(tb, TCA_FLOW_MAX, opt, flow_policy,
410 NULL);
411 if (err < 0)
412 return err;
413
414 if (tb[TCA_FLOW_BASECLASS]) {
415 baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
416 if (TC_H_MIN(baseclass) == 0)
417 return -EINVAL;
418 }
419
420 if (tb[TCA_FLOW_KEYS]) {
421 keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
422
423 nkeys = hweight32(keymask);
424 if (nkeys == 0)
425 return -EINVAL;
426
427 if (fls(keymask) - 1 > FLOW_KEY_MAX)
428 return -EOPNOTSUPP;
429
430 if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
431 sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns)
432 return -EOPNOTSUPP;
433 }
434
435 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
436 if (!fnew)
437 return -ENOBUFS;
438
439 err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
440 if (err < 0)
441 goto err1;
442
443 err = tcf_exts_init(&fnew->exts, net, TCA_FLOW_ACT, TCA_FLOW_POLICE);
444 if (err < 0)
445 goto err2;
446
447 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, flags,
448 extack);
449 if (err < 0)
450 goto err2;
451
452 fold = *arg;
453 if (fold) {
454 err = -EINVAL;
455 if (fold->handle != handle && handle)
456 goto err2;
457
458 /* Copy fold into fnew */
459 fnew->tp = fold->tp;
460 fnew->handle = fold->handle;
461 fnew->nkeys = fold->nkeys;
462 fnew->keymask = fold->keymask;
463 fnew->mode = fold->mode;
464 fnew->mask = fold->mask;
465 fnew->xor = fold->xor;
466 fnew->rshift = fold->rshift;
467 fnew->addend = fold->addend;
468 fnew->divisor = fold->divisor;
469 fnew->baseclass = fold->baseclass;
470 fnew->hashrnd = fold->hashrnd;
471
472 mode = fold->mode;
473 if (tb[TCA_FLOW_MODE])
474 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
475 if (mode != FLOW_MODE_HASH && nkeys > 1)
476 goto err2;
477
478 if (mode == FLOW_MODE_HASH)
479 perturb_period = fold->perturb_period;
480 if (tb[TCA_FLOW_PERTURB]) {
481 if (mode != FLOW_MODE_HASH)
482 goto err2;
483 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
484 }
485 } else {
486 err = -EINVAL;
487 if (!handle)
488 goto err2;
489 if (!tb[TCA_FLOW_KEYS])
490 goto err2;
491
492 mode = FLOW_MODE_MAP;
493 if (tb[TCA_FLOW_MODE])
494 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
495 if (mode != FLOW_MODE_HASH && nkeys > 1)
496 goto err2;
497
498 if (tb[TCA_FLOW_PERTURB]) {
499 if (mode != FLOW_MODE_HASH)
500 goto err2;
501 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
502 }
503
504 if (TC_H_MAJ(baseclass) == 0) {
505 struct Qdisc *q = tcf_block_q(tp->chain->block);
506
507 baseclass = TC_H_MAKE(q->handle, baseclass);
508 }
509 if (TC_H_MIN(baseclass) == 0)
510 baseclass = TC_H_MAKE(baseclass, 1);
511
512 fnew->handle = handle;
513 fnew->mask = ~0U;
514 fnew->tp = tp;
515 get_random_bytes(&fnew->hashrnd, 4);
516 }
517
518 timer_setup(&fnew->perturb_timer, flow_perturbation, TIMER_DEFERRABLE);
519
520 tcf_block_netif_keep_dst(tp->chain->block);
521
522 if (tb[TCA_FLOW_KEYS]) {
523 fnew->keymask = keymask;
524 fnew->nkeys = nkeys;
525 }
526
527 fnew->mode = mode;
528
529 if (tb[TCA_FLOW_MASK])
530 fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
531 if (tb[TCA_FLOW_XOR])
532 fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
533 if (tb[TCA_FLOW_RSHIFT])
534 fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
535 if (tb[TCA_FLOW_ADDEND])
536 fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
537
538 if (tb[TCA_FLOW_DIVISOR])
539 fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
540 if (baseclass)
541 fnew->baseclass = baseclass;
542
543 fnew->perturb_period = perturb_period;
544 if (perturb_period)
545 mod_timer(&fnew->perturb_timer, jiffies + perturb_period);
546
547 if (!*arg)
548 list_add_tail_rcu(&fnew->list, &head->filters);
549 else
550 list_replace_rcu(&fold->list, &fnew->list);
551
552 *arg = fnew;
553
554 if (fold) {
555 tcf_exts_get_net(&fold->exts);
556 tcf_queue_work(&fold->rwork, flow_destroy_filter_work);
557 }
558 return 0;
559
560err2:
561 tcf_exts_destroy(&fnew->exts);
562 tcf_em_tree_destroy(&fnew->ematches);
563err1:
564 kfree(fnew);
565 return err;
566}
567
568static int flow_delete(struct tcf_proto *tp, void *arg, bool *last,
569 bool rtnl_held, struct netlink_ext_ack *extack)
570{
571 struct flow_head *head = rtnl_dereference(tp->root);
572 struct flow_filter *f = arg;
573
574 list_del_rcu(&f->list);
575 tcf_exts_get_net(&f->exts);
576 tcf_queue_work(&f->rwork, flow_destroy_filter_work);
577 *last = list_empty(&head->filters);
578 return 0;
579}
580
581static int flow_init(struct tcf_proto *tp)
582{
583 struct flow_head *head;
584
585 head = kzalloc(sizeof(*head), GFP_KERNEL);
586 if (head == NULL)
587 return -ENOBUFS;
588 INIT_LIST_HEAD(&head->filters);
589 rcu_assign_pointer(tp->root, head);
590 return 0;
591}
592
593static void flow_destroy(struct tcf_proto *tp, bool rtnl_held,
594 struct netlink_ext_ack *extack)
595{
596 struct flow_head *head = rtnl_dereference(tp->root);
597 struct flow_filter *f, *next;
598
599 list_for_each_entry_safe(f, next, &head->filters, list) {
600 list_del_rcu(&f->list);
601 if (tcf_exts_get_net(&f->exts))
602 tcf_queue_work(&f->rwork, flow_destroy_filter_work);
603 else
604 __flow_destroy_filter(f);
605 }
606 kfree_rcu(head, rcu);
607}
608
609static void *flow_get(struct tcf_proto *tp, u32 handle)
610{
611 struct flow_head *head = rtnl_dereference(tp->root);
612 struct flow_filter *f;
613
614 list_for_each_entry(f, &head->filters, list)
615 if (f->handle == handle)
616 return f;
617 return NULL;
618}
619
620static int flow_dump(struct net *net, struct tcf_proto *tp, void *fh,
621 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
622{
623 struct flow_filter *f = fh;
624 struct nlattr *nest;
625
626 if (f == NULL)
627 return skb->len;
628
629 t->tcm_handle = f->handle;
630
631 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
632 if (nest == NULL)
633 goto nla_put_failure;
634
635 if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
636 nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
637 goto nla_put_failure;
638
639 if (f->mask != ~0 || f->xor != 0) {
640 if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
641 nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
642 goto nla_put_failure;
643 }
644 if (f->rshift &&
645 nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
646 goto nla_put_failure;
647 if (f->addend &&
648 nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
649 goto nla_put_failure;
650
651 if (f->divisor &&
652 nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
653 goto nla_put_failure;
654 if (f->baseclass &&
655 nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
656 goto nla_put_failure;
657
658 if (f->perturb_period &&
659 nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
660 goto nla_put_failure;
661
662 if (tcf_exts_dump(skb, &f->exts) < 0)
663 goto nla_put_failure;
664#ifdef CONFIG_NET_EMATCH
665 if (f->ematches.hdr.nmatches &&
666 tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
667 goto nla_put_failure;
668#endif
669 nla_nest_end(skb, nest);
670
671 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
672 goto nla_put_failure;
673
674 return skb->len;
675
676nla_put_failure:
677 nla_nest_cancel(skb, nest);
678 return -1;
679}
680
681static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg,
682 bool rtnl_held)
683{
684 struct flow_head *head = rtnl_dereference(tp->root);
685 struct flow_filter *f;
686
687 list_for_each_entry(f, &head->filters, list) {
688 if (!tc_cls_stats_dump(tp, arg, f))
689 break;
690 }
691}
692
693static struct tcf_proto_ops cls_flow_ops __read_mostly = {
694 .kind = "flow",
695 .classify = flow_classify,
696 .init = flow_init,
697 .destroy = flow_destroy,
698 .change = flow_change,
699 .delete = flow_delete,
700 .get = flow_get,
701 .dump = flow_dump,
702 .walk = flow_walk,
703 .owner = THIS_MODULE,
704};
705
706static int __init cls_flow_init(void)
707{
708 return register_tcf_proto_ops(&cls_flow_ops);
709}
710
711static void __exit cls_flow_exit(void)
712{
713 unregister_tcf_proto_ops(&cls_flow_ops);
714}
715
716module_init(cls_flow_init);
717module_exit(cls_flow_exit);
718
719MODULE_LICENSE("GPL");
720MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
721MODULE_DESCRIPTION("TC flow classifier");