Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/cls_flow.c Generic flow classifier
4 *
5 * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/list.h>
11#include <linux/jhash.h>
12#include <linux/random.h>
13#include <linux/pkt_cls.h>
14#include <linux/skbuff.h>
15#include <linux/in.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <linux/if_vlan.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <net/inet_sock.h>
22
23#include <net/pkt_cls.h>
24#include <net/ip.h>
25#include <net/route.h>
26#include <net/flow_dissector.h>
27#include <net/tc_wrapper.h>
28
29#if IS_ENABLED(CONFIG_NF_CONNTRACK)
30#include <net/netfilter/nf_conntrack.h>
31#endif
32
33struct flow_head {
34 struct list_head filters;
35 struct rcu_head rcu;
36};
37
38struct flow_filter {
39 struct list_head list;
40 struct tcf_exts exts;
41 struct tcf_ematch_tree ematches;
42 struct tcf_proto *tp;
43 struct timer_list perturb_timer;
44 u32 perturb_period;
45 u32 handle;
46
47 u32 nkeys;
48 u32 keymask;
49 u32 mode;
50 u32 mask;
51 u32 xor;
52 u32 rshift;
53 u32 addend;
54 u32 divisor;
55 u32 baseclass;
56 u32 hashrnd;
57 struct rcu_work rwork;
58};
59
60static inline u32 addr_fold(void *addr)
61{
62 unsigned long a = (unsigned long)addr;
63
64 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
65}
66
67static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
68{
69 __be32 src = flow_get_u32_src(flow);
70
71 if (src)
72 return ntohl(src);
73
74 return addr_fold(skb->sk);
75}
76
77static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
78{
79 __be32 dst = flow_get_u32_dst(flow);
80
81 if (dst)
82 return ntohl(dst);
83
84 return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
85}
86
87static u32 flow_get_proto(const struct sk_buff *skb,
88 const struct flow_keys *flow)
89{
90 return flow->basic.ip_proto;
91}
92
93static u32 flow_get_proto_src(const struct sk_buff *skb,
94 const struct flow_keys *flow)
95{
96 if (flow->ports.ports)
97 return ntohs(flow->ports.src);
98
99 return addr_fold(skb->sk);
100}
101
102static u32 flow_get_proto_dst(const struct sk_buff *skb,
103 const struct flow_keys *flow)
104{
105 if (flow->ports.ports)
106 return ntohs(flow->ports.dst);
107
108 return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
109}
110
111static u32 flow_get_iif(const struct sk_buff *skb)
112{
113 return skb->skb_iif;
114}
115
116static u32 flow_get_priority(const struct sk_buff *skb)
117{
118 return skb->priority;
119}
120
121static u32 flow_get_mark(const struct sk_buff *skb)
122{
123 return skb->mark;
124}
125
126static u32 flow_get_nfct(const struct sk_buff *skb)
127{
128#if IS_ENABLED(CONFIG_NF_CONNTRACK)
129 return addr_fold(skb_nfct(skb));
130#else
131 return 0;
132#endif
133}
134
135#if IS_ENABLED(CONFIG_NF_CONNTRACK)
136#define CTTUPLE(skb, member) \
137({ \
138 enum ip_conntrack_info ctinfo; \
139 const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
140 if (ct == NULL) \
141 goto fallback; \
142 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \
143})
144#else
145#define CTTUPLE(skb, member) \
146({ \
147 goto fallback; \
148 0; \
149})
150#endif
151
152static u32 flow_get_nfct_src(const struct sk_buff *skb,
153 const struct flow_keys *flow)
154{
155 switch (skb_protocol(skb, true)) {
156 case htons(ETH_P_IP):
157 return ntohl(CTTUPLE(skb, src.u3.ip));
158 case htons(ETH_P_IPV6):
159 return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
160 }
161fallback:
162 return flow_get_src(skb, flow);
163}
164
165static u32 flow_get_nfct_dst(const struct sk_buff *skb,
166 const struct flow_keys *flow)
167{
168 switch (skb_protocol(skb, true)) {
169 case htons(ETH_P_IP):
170 return ntohl(CTTUPLE(skb, dst.u3.ip));
171 case htons(ETH_P_IPV6):
172 return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
173 }
174fallback:
175 return flow_get_dst(skb, flow);
176}
177
178static u32 flow_get_nfct_proto_src(const struct sk_buff *skb,
179 const struct flow_keys *flow)
180{
181 return ntohs(CTTUPLE(skb, src.u.all));
182fallback:
183 return flow_get_proto_src(skb, flow);
184}
185
186static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb,
187 const struct flow_keys *flow)
188{
189 return ntohs(CTTUPLE(skb, dst.u.all));
190fallback:
191 return flow_get_proto_dst(skb, flow);
192}
193
194static u32 flow_get_rtclassid(const struct sk_buff *skb)
195{
196#ifdef CONFIG_IP_ROUTE_CLASSID
197 if (skb_dst(skb))
198 return skb_dst(skb)->tclassid;
199#endif
200 return 0;
201}
202
203static u32 flow_get_skuid(const struct sk_buff *skb)
204{
205 struct sock *sk = skb_to_full_sk(skb);
206
207 if (sk && sk->sk_socket && sk->sk_socket->file) {
208 kuid_t skuid = sk->sk_socket->file->f_cred->fsuid;
209
210 return from_kuid(&init_user_ns, skuid);
211 }
212 return 0;
213}
214
215static u32 flow_get_skgid(const struct sk_buff *skb)
216{
217 struct sock *sk = skb_to_full_sk(skb);
218
219 if (sk && sk->sk_socket && sk->sk_socket->file) {
220 kgid_t skgid = sk->sk_socket->file->f_cred->fsgid;
221
222 return from_kgid(&init_user_ns, skgid);
223 }
224 return 0;
225}
226
227static u32 flow_get_vlan_tag(const struct sk_buff *skb)
228{
229 u16 tag;
230
231 if (vlan_get_tag(skb, &tag) < 0)
232 return 0;
233 return tag & VLAN_VID_MASK;
234}
235
236static u32 flow_get_rxhash(struct sk_buff *skb)
237{
238 return skb_get_hash(skb);
239}
240
241static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
242{
243 switch (key) {
244 case FLOW_KEY_SRC:
245 return flow_get_src(skb, flow);
246 case FLOW_KEY_DST:
247 return flow_get_dst(skb, flow);
248 case FLOW_KEY_PROTO:
249 return flow_get_proto(skb, flow);
250 case FLOW_KEY_PROTO_SRC:
251 return flow_get_proto_src(skb, flow);
252 case FLOW_KEY_PROTO_DST:
253 return flow_get_proto_dst(skb, flow);
254 case FLOW_KEY_IIF:
255 return flow_get_iif(skb);
256 case FLOW_KEY_PRIORITY:
257 return flow_get_priority(skb);
258 case FLOW_KEY_MARK:
259 return flow_get_mark(skb);
260 case FLOW_KEY_NFCT:
261 return flow_get_nfct(skb);
262 case FLOW_KEY_NFCT_SRC:
263 return flow_get_nfct_src(skb, flow);
264 case FLOW_KEY_NFCT_DST:
265 return flow_get_nfct_dst(skb, flow);
266 case FLOW_KEY_NFCT_PROTO_SRC:
267 return flow_get_nfct_proto_src(skb, flow);
268 case FLOW_KEY_NFCT_PROTO_DST:
269 return flow_get_nfct_proto_dst(skb, flow);
270 case FLOW_KEY_RTCLASSID:
271 return flow_get_rtclassid(skb);
272 case FLOW_KEY_SKUID:
273 return flow_get_skuid(skb);
274 case FLOW_KEY_SKGID:
275 return flow_get_skgid(skb);
276 case FLOW_KEY_VLAN_TAG:
277 return flow_get_vlan_tag(skb);
278 case FLOW_KEY_RXHASH:
279 return flow_get_rxhash(skb);
280 default:
281 WARN_ON(1);
282 return 0;
283 }
284}
285
286#define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | \
287 (1 << FLOW_KEY_DST) | \
288 (1 << FLOW_KEY_PROTO) | \
289 (1 << FLOW_KEY_PROTO_SRC) | \
290 (1 << FLOW_KEY_PROTO_DST) | \
291 (1 << FLOW_KEY_NFCT_SRC) | \
292 (1 << FLOW_KEY_NFCT_DST) | \
293 (1 << FLOW_KEY_NFCT_PROTO_SRC) | \
294 (1 << FLOW_KEY_NFCT_PROTO_DST))
295
296TC_INDIRECT_SCOPE int flow_classify(struct sk_buff *skb,
297 const struct tcf_proto *tp,
298 struct tcf_result *res)
299{
300 struct flow_head *head = rcu_dereference_bh(tp->root);
301 struct flow_filter *f;
302 u32 keymask;
303 u32 classid;
304 unsigned int n, key;
305 int r;
306
307 list_for_each_entry_rcu(f, &head->filters, list) {
308 u32 keys[FLOW_KEY_MAX + 1];
309 struct flow_keys flow_keys;
310
311 if (!tcf_em_tree_match(skb, &f->ematches, NULL))
312 continue;
313
314 keymask = f->keymask;
315 if (keymask & FLOW_KEYS_NEEDED)
316 skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
317
318 for (n = 0; n < f->nkeys; n++) {
319 key = ffs(keymask) - 1;
320 keymask &= ~(1 << key);
321 keys[n] = flow_key_get(skb, key, &flow_keys);
322 }
323
324 if (f->mode == FLOW_MODE_HASH)
325 classid = jhash2(keys, f->nkeys, f->hashrnd);
326 else {
327 classid = keys[0];
328 classid = (classid & f->mask) ^ f->xor;
329 classid = (classid >> f->rshift) + f->addend;
330 }
331
332 if (f->divisor)
333 classid %= f->divisor;
334
335 res->class = 0;
336 res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
337
338 r = tcf_exts_exec(skb, &f->exts, res);
339 if (r < 0)
340 continue;
341 return r;
342 }
343 return -1;
344}
345
346static void flow_perturbation(struct timer_list *t)
347{
348 struct flow_filter *f = from_timer(f, t, perturb_timer);
349
350 get_random_bytes(&f->hashrnd, 4);
351 if (f->perturb_period)
352 mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
353}
354
355static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
356 [TCA_FLOW_KEYS] = { .type = NLA_U32 },
357 [TCA_FLOW_MODE] = { .type = NLA_U32 },
358 [TCA_FLOW_BASECLASS] = { .type = NLA_U32 },
359 [TCA_FLOW_RSHIFT] = NLA_POLICY_MAX(NLA_U32,
360 31 /* BITS_PER_U32 - 1 */),
361 [TCA_FLOW_ADDEND] = { .type = NLA_U32 },
362 [TCA_FLOW_MASK] = { .type = NLA_U32 },
363 [TCA_FLOW_XOR] = { .type = NLA_U32 },
364 [TCA_FLOW_DIVISOR] = { .type = NLA_U32 },
365 [TCA_FLOW_ACT] = { .type = NLA_NESTED },
366 [TCA_FLOW_POLICE] = { .type = NLA_NESTED },
367 [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED },
368 [TCA_FLOW_PERTURB] = { .type = NLA_U32 },
369};
370
371static void __flow_destroy_filter(struct flow_filter *f)
372{
373 timer_shutdown_sync(&f->perturb_timer);
374 tcf_exts_destroy(&f->exts);
375 tcf_em_tree_destroy(&f->ematches);
376 tcf_exts_put_net(&f->exts);
377 kfree(f);
378}
379
380static void flow_destroy_filter_work(struct work_struct *work)
381{
382 struct flow_filter *f = container_of(to_rcu_work(work),
383 struct flow_filter,
384 rwork);
385 rtnl_lock();
386 __flow_destroy_filter(f);
387 rtnl_unlock();
388}
389
390static int flow_change(struct net *net, struct sk_buff *in_skb,
391 struct tcf_proto *tp, unsigned long base,
392 u32 handle, struct nlattr **tca,
393 void **arg, u32 flags,
394 struct netlink_ext_ack *extack)
395{
396 struct flow_head *head = rtnl_dereference(tp->root);
397 struct flow_filter *fold, *fnew;
398 struct nlattr *opt = tca[TCA_OPTIONS];
399 struct nlattr *tb[TCA_FLOW_MAX + 1];
400 unsigned int nkeys = 0;
401 unsigned int perturb_period = 0;
402 u32 baseclass = 0;
403 u32 keymask = 0;
404 u32 mode;
405 int err;
406
407 if (opt == NULL)
408 return -EINVAL;
409
410 err = nla_parse_nested_deprecated(tb, TCA_FLOW_MAX, opt, flow_policy,
411 NULL);
412 if (err < 0)
413 return err;
414
415 if (tb[TCA_FLOW_BASECLASS]) {
416 baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
417 if (TC_H_MIN(baseclass) == 0)
418 return -EINVAL;
419 }
420
421 if (tb[TCA_FLOW_KEYS]) {
422 keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
423
424 nkeys = hweight32(keymask);
425 if (nkeys == 0)
426 return -EINVAL;
427
428 if (fls(keymask) - 1 > FLOW_KEY_MAX)
429 return -EOPNOTSUPP;
430
431 if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
432 sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns)
433 return -EOPNOTSUPP;
434 }
435
436 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
437 if (!fnew)
438 return -ENOBUFS;
439
440 err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
441 if (err < 0)
442 goto err1;
443
444 err = tcf_exts_init(&fnew->exts, net, TCA_FLOW_ACT, TCA_FLOW_POLICE);
445 if (err < 0)
446 goto err2;
447
448 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, flags,
449 extack);
450 if (err < 0)
451 goto err2;
452
453 fold = *arg;
454 if (fold) {
455 err = -EINVAL;
456 if (fold->handle != handle && handle)
457 goto err2;
458
459 /* Copy fold into fnew */
460 fnew->tp = fold->tp;
461 fnew->handle = fold->handle;
462 fnew->nkeys = fold->nkeys;
463 fnew->keymask = fold->keymask;
464 fnew->mode = fold->mode;
465 fnew->mask = fold->mask;
466 fnew->xor = fold->xor;
467 fnew->rshift = fold->rshift;
468 fnew->addend = fold->addend;
469 fnew->divisor = fold->divisor;
470 fnew->baseclass = fold->baseclass;
471 fnew->hashrnd = fold->hashrnd;
472
473 mode = fold->mode;
474 if (tb[TCA_FLOW_MODE])
475 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
476 if (mode != FLOW_MODE_HASH && nkeys > 1)
477 goto err2;
478
479 if (mode == FLOW_MODE_HASH)
480 perturb_period = fold->perturb_period;
481 if (tb[TCA_FLOW_PERTURB]) {
482 if (mode != FLOW_MODE_HASH)
483 goto err2;
484 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
485 }
486 } else {
487 err = -EINVAL;
488 if (!handle)
489 goto err2;
490 if (!tb[TCA_FLOW_KEYS])
491 goto err2;
492
493 mode = FLOW_MODE_MAP;
494 if (tb[TCA_FLOW_MODE])
495 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
496 if (mode != FLOW_MODE_HASH && nkeys > 1)
497 goto err2;
498
499 if (tb[TCA_FLOW_PERTURB]) {
500 if (mode != FLOW_MODE_HASH)
501 goto err2;
502 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
503 }
504
505 if (TC_H_MAJ(baseclass) == 0) {
506 struct Qdisc *q = tcf_block_q(tp->chain->block);
507
508 baseclass = TC_H_MAKE(q->handle, baseclass);
509 }
510 if (TC_H_MIN(baseclass) == 0)
511 baseclass = TC_H_MAKE(baseclass, 1);
512
513 fnew->handle = handle;
514 fnew->mask = ~0U;
515 fnew->tp = tp;
516 get_random_bytes(&fnew->hashrnd, 4);
517 }
518
519 timer_setup(&fnew->perturb_timer, flow_perturbation, TIMER_DEFERRABLE);
520
521 tcf_block_netif_keep_dst(tp->chain->block);
522
523 if (tb[TCA_FLOW_KEYS]) {
524 fnew->keymask = keymask;
525 fnew->nkeys = nkeys;
526 }
527
528 fnew->mode = mode;
529
530 if (tb[TCA_FLOW_MASK])
531 fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
532 if (tb[TCA_FLOW_XOR])
533 fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
534 if (tb[TCA_FLOW_RSHIFT])
535 fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
536 if (tb[TCA_FLOW_ADDEND])
537 fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
538
539 if (tb[TCA_FLOW_DIVISOR])
540 fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
541 if (baseclass)
542 fnew->baseclass = baseclass;
543
544 fnew->perturb_period = perturb_period;
545 if (perturb_period)
546 mod_timer(&fnew->perturb_timer, jiffies + perturb_period);
547
548 if (!*arg)
549 list_add_tail_rcu(&fnew->list, &head->filters);
550 else
551 list_replace_rcu(&fold->list, &fnew->list);
552
553 *arg = fnew;
554
555 if (fold) {
556 tcf_exts_get_net(&fold->exts);
557 tcf_queue_work(&fold->rwork, flow_destroy_filter_work);
558 }
559 return 0;
560
561err2:
562 tcf_exts_destroy(&fnew->exts);
563 tcf_em_tree_destroy(&fnew->ematches);
564err1:
565 kfree(fnew);
566 return err;
567}
568
569static int flow_delete(struct tcf_proto *tp, void *arg, bool *last,
570 bool rtnl_held, struct netlink_ext_ack *extack)
571{
572 struct flow_head *head = rtnl_dereference(tp->root);
573 struct flow_filter *f = arg;
574
575 list_del_rcu(&f->list);
576 tcf_exts_get_net(&f->exts);
577 tcf_queue_work(&f->rwork, flow_destroy_filter_work);
578 *last = list_empty(&head->filters);
579 return 0;
580}
581
582static int flow_init(struct tcf_proto *tp)
583{
584 struct flow_head *head;
585
586 head = kzalloc(sizeof(*head), GFP_KERNEL);
587 if (head == NULL)
588 return -ENOBUFS;
589 INIT_LIST_HEAD(&head->filters);
590 rcu_assign_pointer(tp->root, head);
591 return 0;
592}
593
594static void flow_destroy(struct tcf_proto *tp, bool rtnl_held,
595 struct netlink_ext_ack *extack)
596{
597 struct flow_head *head = rtnl_dereference(tp->root);
598 struct flow_filter *f, *next;
599
600 list_for_each_entry_safe(f, next, &head->filters, list) {
601 list_del_rcu(&f->list);
602 if (tcf_exts_get_net(&f->exts))
603 tcf_queue_work(&f->rwork, flow_destroy_filter_work);
604 else
605 __flow_destroy_filter(f);
606 }
607 kfree_rcu(head, rcu);
608}
609
610static void *flow_get(struct tcf_proto *tp, u32 handle)
611{
612 struct flow_head *head = rtnl_dereference(tp->root);
613 struct flow_filter *f;
614
615 list_for_each_entry(f, &head->filters, list)
616 if (f->handle == handle)
617 return f;
618 return NULL;
619}
620
621static int flow_dump(struct net *net, struct tcf_proto *tp, void *fh,
622 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
623{
624 struct flow_filter *f = fh;
625 struct nlattr *nest;
626
627 if (f == NULL)
628 return skb->len;
629
630 t->tcm_handle = f->handle;
631
632 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
633 if (nest == NULL)
634 goto nla_put_failure;
635
636 if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
637 nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
638 goto nla_put_failure;
639
640 if (f->mask != ~0 || f->xor != 0) {
641 if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
642 nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
643 goto nla_put_failure;
644 }
645 if (f->rshift &&
646 nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
647 goto nla_put_failure;
648 if (f->addend &&
649 nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
650 goto nla_put_failure;
651
652 if (f->divisor &&
653 nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
654 goto nla_put_failure;
655 if (f->baseclass &&
656 nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
657 goto nla_put_failure;
658
659 if (f->perturb_period &&
660 nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
661 goto nla_put_failure;
662
663 if (tcf_exts_dump(skb, &f->exts) < 0)
664 goto nla_put_failure;
665#ifdef CONFIG_NET_EMATCH
666 if (f->ematches.hdr.nmatches &&
667 tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
668 goto nla_put_failure;
669#endif
670 nla_nest_end(skb, nest);
671
672 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
673 goto nla_put_failure;
674
675 return skb->len;
676
677nla_put_failure:
678 nla_nest_cancel(skb, nest);
679 return -1;
680}
681
682static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg,
683 bool rtnl_held)
684{
685 struct flow_head *head = rtnl_dereference(tp->root);
686 struct flow_filter *f;
687
688 list_for_each_entry(f, &head->filters, list) {
689 if (!tc_cls_stats_dump(tp, arg, f))
690 break;
691 }
692}
693
694static struct tcf_proto_ops cls_flow_ops __read_mostly = {
695 .kind = "flow",
696 .classify = flow_classify,
697 .init = flow_init,
698 .destroy = flow_destroy,
699 .change = flow_change,
700 .delete = flow_delete,
701 .get = flow_get,
702 .dump = flow_dump,
703 .walk = flow_walk,
704 .owner = THIS_MODULE,
705};
706MODULE_ALIAS_NET_CLS("flow");
707
708static int __init cls_flow_init(void)
709{
710 return register_tcf_proto_ops(&cls_flow_ops);
711}
712
713static void __exit cls_flow_exit(void)
714{
715 unregister_tcf_proto_ops(&cls_flow_ops);
716}
717
718module_init(cls_flow_init);
719module_exit(cls_flow_exit);
720
721MODULE_LICENSE("GPL");
722MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
723MODULE_DESCRIPTION("TC flow classifier");
1/*
2 * net/sched/cls_flow.c Generic flow classifier
3 *
4 * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/list.h>
15#include <linux/jhash.h>
16#include <linux/random.h>
17#include <linux/pkt_cls.h>
18#include <linux/skbuff.h>
19#include <linux/in.h>
20#include <linux/ip.h>
21#include <linux/ipv6.h>
22#include <linux/if_vlan.h>
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <net/inet_sock.h>
26
27#include <net/pkt_cls.h>
28#include <net/ip.h>
29#include <net/route.h>
30#include <net/flow_dissector.h>
31
32#if IS_ENABLED(CONFIG_NF_CONNTRACK)
33#include <net/netfilter/nf_conntrack.h>
34#endif
35
36struct flow_head {
37 struct list_head filters;
38 struct rcu_head rcu;
39};
40
41struct flow_filter {
42 struct list_head list;
43 struct tcf_exts exts;
44 struct tcf_ematch_tree ematches;
45 struct tcf_proto *tp;
46 struct timer_list perturb_timer;
47 u32 perturb_period;
48 u32 handle;
49
50 u32 nkeys;
51 u32 keymask;
52 u32 mode;
53 u32 mask;
54 u32 xor;
55 u32 rshift;
56 u32 addend;
57 u32 divisor;
58 u32 baseclass;
59 u32 hashrnd;
60 union {
61 struct work_struct work;
62 struct rcu_head rcu;
63 };
64};
65
66static inline u32 addr_fold(void *addr)
67{
68 unsigned long a = (unsigned long)addr;
69
70 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
71}
72
73static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
74{
75 __be32 src = flow_get_u32_src(flow);
76
77 if (src)
78 return ntohl(src);
79
80 return addr_fold(skb->sk);
81}
82
83static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
84{
85 __be32 dst = flow_get_u32_dst(flow);
86
87 if (dst)
88 return ntohl(dst);
89
90 return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
91}
92
93static u32 flow_get_proto(const struct sk_buff *skb,
94 const struct flow_keys *flow)
95{
96 return flow->basic.ip_proto;
97}
98
99static u32 flow_get_proto_src(const struct sk_buff *skb,
100 const struct flow_keys *flow)
101{
102 if (flow->ports.ports)
103 return ntohs(flow->ports.src);
104
105 return addr_fold(skb->sk);
106}
107
108static u32 flow_get_proto_dst(const struct sk_buff *skb,
109 const struct flow_keys *flow)
110{
111 if (flow->ports.ports)
112 return ntohs(flow->ports.dst);
113
114 return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
115}
116
117static u32 flow_get_iif(const struct sk_buff *skb)
118{
119 return skb->skb_iif;
120}
121
122static u32 flow_get_priority(const struct sk_buff *skb)
123{
124 return skb->priority;
125}
126
127static u32 flow_get_mark(const struct sk_buff *skb)
128{
129 return skb->mark;
130}
131
132static u32 flow_get_nfct(const struct sk_buff *skb)
133{
134#if IS_ENABLED(CONFIG_NF_CONNTRACK)
135 return addr_fold(skb_nfct(skb));
136#else
137 return 0;
138#endif
139}
140
141#if IS_ENABLED(CONFIG_NF_CONNTRACK)
142#define CTTUPLE(skb, member) \
143({ \
144 enum ip_conntrack_info ctinfo; \
145 const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
146 if (ct == NULL) \
147 goto fallback; \
148 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \
149})
150#else
151#define CTTUPLE(skb, member) \
152({ \
153 goto fallback; \
154 0; \
155})
156#endif
157
158static u32 flow_get_nfct_src(const struct sk_buff *skb,
159 const struct flow_keys *flow)
160{
161 switch (tc_skb_protocol(skb)) {
162 case htons(ETH_P_IP):
163 return ntohl(CTTUPLE(skb, src.u3.ip));
164 case htons(ETH_P_IPV6):
165 return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
166 }
167fallback:
168 return flow_get_src(skb, flow);
169}
170
171static u32 flow_get_nfct_dst(const struct sk_buff *skb,
172 const struct flow_keys *flow)
173{
174 switch (tc_skb_protocol(skb)) {
175 case htons(ETH_P_IP):
176 return ntohl(CTTUPLE(skb, dst.u3.ip));
177 case htons(ETH_P_IPV6):
178 return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
179 }
180fallback:
181 return flow_get_dst(skb, flow);
182}
183
184static u32 flow_get_nfct_proto_src(const struct sk_buff *skb,
185 const struct flow_keys *flow)
186{
187 return ntohs(CTTUPLE(skb, src.u.all));
188fallback:
189 return flow_get_proto_src(skb, flow);
190}
191
192static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb,
193 const struct flow_keys *flow)
194{
195 return ntohs(CTTUPLE(skb, dst.u.all));
196fallback:
197 return flow_get_proto_dst(skb, flow);
198}
199
200static u32 flow_get_rtclassid(const struct sk_buff *skb)
201{
202#ifdef CONFIG_IP_ROUTE_CLASSID
203 if (skb_dst(skb))
204 return skb_dst(skb)->tclassid;
205#endif
206 return 0;
207}
208
209static u32 flow_get_skuid(const struct sk_buff *skb)
210{
211 struct sock *sk = skb_to_full_sk(skb);
212
213 if (sk && sk->sk_socket && sk->sk_socket->file) {
214 kuid_t skuid = sk->sk_socket->file->f_cred->fsuid;
215
216 return from_kuid(&init_user_ns, skuid);
217 }
218 return 0;
219}
220
221static u32 flow_get_skgid(const struct sk_buff *skb)
222{
223 struct sock *sk = skb_to_full_sk(skb);
224
225 if (sk && sk->sk_socket && sk->sk_socket->file) {
226 kgid_t skgid = sk->sk_socket->file->f_cred->fsgid;
227
228 return from_kgid(&init_user_ns, skgid);
229 }
230 return 0;
231}
232
233static u32 flow_get_vlan_tag(const struct sk_buff *skb)
234{
235 u16 uninitialized_var(tag);
236
237 if (vlan_get_tag(skb, &tag) < 0)
238 return 0;
239 return tag & VLAN_VID_MASK;
240}
241
242static u32 flow_get_rxhash(struct sk_buff *skb)
243{
244 return skb_get_hash(skb);
245}
246
247static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
248{
249 switch (key) {
250 case FLOW_KEY_SRC:
251 return flow_get_src(skb, flow);
252 case FLOW_KEY_DST:
253 return flow_get_dst(skb, flow);
254 case FLOW_KEY_PROTO:
255 return flow_get_proto(skb, flow);
256 case FLOW_KEY_PROTO_SRC:
257 return flow_get_proto_src(skb, flow);
258 case FLOW_KEY_PROTO_DST:
259 return flow_get_proto_dst(skb, flow);
260 case FLOW_KEY_IIF:
261 return flow_get_iif(skb);
262 case FLOW_KEY_PRIORITY:
263 return flow_get_priority(skb);
264 case FLOW_KEY_MARK:
265 return flow_get_mark(skb);
266 case FLOW_KEY_NFCT:
267 return flow_get_nfct(skb);
268 case FLOW_KEY_NFCT_SRC:
269 return flow_get_nfct_src(skb, flow);
270 case FLOW_KEY_NFCT_DST:
271 return flow_get_nfct_dst(skb, flow);
272 case FLOW_KEY_NFCT_PROTO_SRC:
273 return flow_get_nfct_proto_src(skb, flow);
274 case FLOW_KEY_NFCT_PROTO_DST:
275 return flow_get_nfct_proto_dst(skb, flow);
276 case FLOW_KEY_RTCLASSID:
277 return flow_get_rtclassid(skb);
278 case FLOW_KEY_SKUID:
279 return flow_get_skuid(skb);
280 case FLOW_KEY_SKGID:
281 return flow_get_skgid(skb);
282 case FLOW_KEY_VLAN_TAG:
283 return flow_get_vlan_tag(skb);
284 case FLOW_KEY_RXHASH:
285 return flow_get_rxhash(skb);
286 default:
287 WARN_ON(1);
288 return 0;
289 }
290}
291
292#define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | \
293 (1 << FLOW_KEY_DST) | \
294 (1 << FLOW_KEY_PROTO) | \
295 (1 << FLOW_KEY_PROTO_SRC) | \
296 (1 << FLOW_KEY_PROTO_DST) | \
297 (1 << FLOW_KEY_NFCT_SRC) | \
298 (1 << FLOW_KEY_NFCT_DST) | \
299 (1 << FLOW_KEY_NFCT_PROTO_SRC) | \
300 (1 << FLOW_KEY_NFCT_PROTO_DST))
301
302static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
303 struct tcf_result *res)
304{
305 struct flow_head *head = rcu_dereference_bh(tp->root);
306 struct flow_filter *f;
307 u32 keymask;
308 u32 classid;
309 unsigned int n, key;
310 int r;
311
312 list_for_each_entry_rcu(f, &head->filters, list) {
313 u32 keys[FLOW_KEY_MAX + 1];
314 struct flow_keys flow_keys;
315
316 if (!tcf_em_tree_match(skb, &f->ematches, NULL))
317 continue;
318
319 keymask = f->keymask;
320 if (keymask & FLOW_KEYS_NEEDED)
321 skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
322
323 for (n = 0; n < f->nkeys; n++) {
324 key = ffs(keymask) - 1;
325 keymask &= ~(1 << key);
326 keys[n] = flow_key_get(skb, key, &flow_keys);
327 }
328
329 if (f->mode == FLOW_MODE_HASH)
330 classid = jhash2(keys, f->nkeys, f->hashrnd);
331 else {
332 classid = keys[0];
333 classid = (classid & f->mask) ^ f->xor;
334 classid = (classid >> f->rshift) + f->addend;
335 }
336
337 if (f->divisor)
338 classid %= f->divisor;
339
340 res->class = 0;
341 res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
342
343 r = tcf_exts_exec(skb, &f->exts, res);
344 if (r < 0)
345 continue;
346 return r;
347 }
348 return -1;
349}
350
351static void flow_perturbation(struct timer_list *t)
352{
353 struct flow_filter *f = from_timer(f, t, perturb_timer);
354
355 get_random_bytes(&f->hashrnd, 4);
356 if (f->perturb_period)
357 mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
358}
359
360static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
361 [TCA_FLOW_KEYS] = { .type = NLA_U32 },
362 [TCA_FLOW_MODE] = { .type = NLA_U32 },
363 [TCA_FLOW_BASECLASS] = { .type = NLA_U32 },
364 [TCA_FLOW_RSHIFT] = { .type = NLA_U32 },
365 [TCA_FLOW_ADDEND] = { .type = NLA_U32 },
366 [TCA_FLOW_MASK] = { .type = NLA_U32 },
367 [TCA_FLOW_XOR] = { .type = NLA_U32 },
368 [TCA_FLOW_DIVISOR] = { .type = NLA_U32 },
369 [TCA_FLOW_ACT] = { .type = NLA_NESTED },
370 [TCA_FLOW_POLICE] = { .type = NLA_NESTED },
371 [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED },
372 [TCA_FLOW_PERTURB] = { .type = NLA_U32 },
373};
374
375static void __flow_destroy_filter(struct flow_filter *f)
376{
377 del_timer_sync(&f->perturb_timer);
378 tcf_exts_destroy(&f->exts);
379 tcf_em_tree_destroy(&f->ematches);
380 tcf_exts_put_net(&f->exts);
381 kfree(f);
382}
383
384static void flow_destroy_filter_work(struct work_struct *work)
385{
386 struct flow_filter *f = container_of(work, struct flow_filter, work);
387
388 rtnl_lock();
389 __flow_destroy_filter(f);
390 rtnl_unlock();
391}
392
393static void flow_destroy_filter(struct rcu_head *head)
394{
395 struct flow_filter *f = container_of(head, struct flow_filter, rcu);
396
397 INIT_WORK(&f->work, flow_destroy_filter_work);
398 tcf_queue_work(&f->work);
399}
400
401static int flow_change(struct net *net, struct sk_buff *in_skb,
402 struct tcf_proto *tp, unsigned long base,
403 u32 handle, struct nlattr **tca,
404 void **arg, bool ovr, struct netlink_ext_ack *extack)
405{
406 struct flow_head *head = rtnl_dereference(tp->root);
407 struct flow_filter *fold, *fnew;
408 struct nlattr *opt = tca[TCA_OPTIONS];
409 struct nlattr *tb[TCA_FLOW_MAX + 1];
410 unsigned int nkeys = 0;
411 unsigned int perturb_period = 0;
412 u32 baseclass = 0;
413 u32 keymask = 0;
414 u32 mode;
415 int err;
416
417 if (opt == NULL)
418 return -EINVAL;
419
420 err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy, NULL);
421 if (err < 0)
422 return err;
423
424 if (tb[TCA_FLOW_BASECLASS]) {
425 baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
426 if (TC_H_MIN(baseclass) == 0)
427 return -EINVAL;
428 }
429
430 if (tb[TCA_FLOW_KEYS]) {
431 keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
432
433 nkeys = hweight32(keymask);
434 if (nkeys == 0)
435 return -EINVAL;
436
437 if (fls(keymask) - 1 > FLOW_KEY_MAX)
438 return -EOPNOTSUPP;
439
440 if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
441 sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns)
442 return -EOPNOTSUPP;
443 }
444
445 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
446 if (!fnew)
447 return -ENOBUFS;
448
449 err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
450 if (err < 0)
451 goto err1;
452
453 err = tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
454 if (err < 0)
455 goto err2;
456
457 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr,
458 extack);
459 if (err < 0)
460 goto err2;
461
462 fold = *arg;
463 if (fold) {
464 err = -EINVAL;
465 if (fold->handle != handle && handle)
466 goto err2;
467
468 /* Copy fold into fnew */
469 fnew->tp = fold->tp;
470 fnew->handle = fold->handle;
471 fnew->nkeys = fold->nkeys;
472 fnew->keymask = fold->keymask;
473 fnew->mode = fold->mode;
474 fnew->mask = fold->mask;
475 fnew->xor = fold->xor;
476 fnew->rshift = fold->rshift;
477 fnew->addend = fold->addend;
478 fnew->divisor = fold->divisor;
479 fnew->baseclass = fold->baseclass;
480 fnew->hashrnd = fold->hashrnd;
481
482 mode = fold->mode;
483 if (tb[TCA_FLOW_MODE])
484 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
485 if (mode != FLOW_MODE_HASH && nkeys > 1)
486 goto err2;
487
488 if (mode == FLOW_MODE_HASH)
489 perturb_period = fold->perturb_period;
490 if (tb[TCA_FLOW_PERTURB]) {
491 if (mode != FLOW_MODE_HASH)
492 goto err2;
493 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
494 }
495 } else {
496 err = -EINVAL;
497 if (!handle)
498 goto err2;
499 if (!tb[TCA_FLOW_KEYS])
500 goto err2;
501
502 mode = FLOW_MODE_MAP;
503 if (tb[TCA_FLOW_MODE])
504 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
505 if (mode != FLOW_MODE_HASH && nkeys > 1)
506 goto err2;
507
508 if (tb[TCA_FLOW_PERTURB]) {
509 if (mode != FLOW_MODE_HASH)
510 goto err2;
511 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
512 }
513
514 if (TC_H_MAJ(baseclass) == 0) {
515 struct Qdisc *q = tcf_block_q(tp->chain->block);
516
517 baseclass = TC_H_MAKE(q->handle, baseclass);
518 }
519 if (TC_H_MIN(baseclass) == 0)
520 baseclass = TC_H_MAKE(baseclass, 1);
521
522 fnew->handle = handle;
523 fnew->mask = ~0U;
524 fnew->tp = tp;
525 get_random_bytes(&fnew->hashrnd, 4);
526 }
527
528 timer_setup(&fnew->perturb_timer, flow_perturbation, TIMER_DEFERRABLE);
529
530 tcf_block_netif_keep_dst(tp->chain->block);
531
532 if (tb[TCA_FLOW_KEYS]) {
533 fnew->keymask = keymask;
534 fnew->nkeys = nkeys;
535 }
536
537 fnew->mode = mode;
538
539 if (tb[TCA_FLOW_MASK])
540 fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
541 if (tb[TCA_FLOW_XOR])
542 fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
543 if (tb[TCA_FLOW_RSHIFT])
544 fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
545 if (tb[TCA_FLOW_ADDEND])
546 fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
547
548 if (tb[TCA_FLOW_DIVISOR])
549 fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
550 if (baseclass)
551 fnew->baseclass = baseclass;
552
553 fnew->perturb_period = perturb_period;
554 if (perturb_period)
555 mod_timer(&fnew->perturb_timer, jiffies + perturb_period);
556
557 if (!*arg)
558 list_add_tail_rcu(&fnew->list, &head->filters);
559 else
560 list_replace_rcu(&fold->list, &fnew->list);
561
562 *arg = fnew;
563
564 if (fold) {
565 tcf_exts_get_net(&fold->exts);
566 call_rcu(&fold->rcu, flow_destroy_filter);
567 }
568 return 0;
569
570err2:
571 tcf_exts_destroy(&fnew->exts);
572 tcf_em_tree_destroy(&fnew->ematches);
573err1:
574 kfree(fnew);
575 return err;
576}
577
578static int flow_delete(struct tcf_proto *tp, void *arg, bool *last,
579 struct netlink_ext_ack *extack)
580{
581 struct flow_head *head = rtnl_dereference(tp->root);
582 struct flow_filter *f = arg;
583
584 list_del_rcu(&f->list);
585 tcf_exts_get_net(&f->exts);
586 call_rcu(&f->rcu, flow_destroy_filter);
587 *last = list_empty(&head->filters);
588 return 0;
589}
590
591static int flow_init(struct tcf_proto *tp)
592{
593 struct flow_head *head;
594
595 head = kzalloc(sizeof(*head), GFP_KERNEL);
596 if (head == NULL)
597 return -ENOBUFS;
598 INIT_LIST_HEAD(&head->filters);
599 rcu_assign_pointer(tp->root, head);
600 return 0;
601}
602
603static void flow_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
604{
605 struct flow_head *head = rtnl_dereference(tp->root);
606 struct flow_filter *f, *next;
607
608 list_for_each_entry_safe(f, next, &head->filters, list) {
609 list_del_rcu(&f->list);
610 if (tcf_exts_get_net(&f->exts))
611 call_rcu(&f->rcu, flow_destroy_filter);
612 else
613 __flow_destroy_filter(f);
614 }
615 kfree_rcu(head, rcu);
616}
617
618static void *flow_get(struct tcf_proto *tp, u32 handle)
619{
620 struct flow_head *head = rtnl_dereference(tp->root);
621 struct flow_filter *f;
622
623 list_for_each_entry(f, &head->filters, list)
624 if (f->handle == handle)
625 return f;
626 return NULL;
627}
628
629static int flow_dump(struct net *net, struct tcf_proto *tp, void *fh,
630 struct sk_buff *skb, struct tcmsg *t)
631{
632 struct flow_filter *f = fh;
633 struct nlattr *nest;
634
635 if (f == NULL)
636 return skb->len;
637
638 t->tcm_handle = f->handle;
639
640 nest = nla_nest_start(skb, TCA_OPTIONS);
641 if (nest == NULL)
642 goto nla_put_failure;
643
644 if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
645 nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
646 goto nla_put_failure;
647
648 if (f->mask != ~0 || f->xor != 0) {
649 if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
650 nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
651 goto nla_put_failure;
652 }
653 if (f->rshift &&
654 nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
655 goto nla_put_failure;
656 if (f->addend &&
657 nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
658 goto nla_put_failure;
659
660 if (f->divisor &&
661 nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
662 goto nla_put_failure;
663 if (f->baseclass &&
664 nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
665 goto nla_put_failure;
666
667 if (f->perturb_period &&
668 nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
669 goto nla_put_failure;
670
671 if (tcf_exts_dump(skb, &f->exts) < 0)
672 goto nla_put_failure;
673#ifdef CONFIG_NET_EMATCH
674 if (f->ematches.hdr.nmatches &&
675 tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
676 goto nla_put_failure;
677#endif
678 nla_nest_end(skb, nest);
679
680 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
681 goto nla_put_failure;
682
683 return skb->len;
684
685nla_put_failure:
686 nla_nest_cancel(skb, nest);
687 return -1;
688}
689
690static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg)
691{
692 struct flow_head *head = rtnl_dereference(tp->root);
693 struct flow_filter *f;
694
695 list_for_each_entry(f, &head->filters, list) {
696 if (arg->count < arg->skip)
697 goto skip;
698 if (arg->fn(tp, f, arg) < 0) {
699 arg->stop = 1;
700 break;
701 }
702skip:
703 arg->count++;
704 }
705}
706
707static struct tcf_proto_ops cls_flow_ops __read_mostly = {
708 .kind = "flow",
709 .classify = flow_classify,
710 .init = flow_init,
711 .destroy = flow_destroy,
712 .change = flow_change,
713 .delete = flow_delete,
714 .get = flow_get,
715 .dump = flow_dump,
716 .walk = flow_walk,
717 .owner = THIS_MODULE,
718};
719
720static int __init cls_flow_init(void)
721{
722 return register_tcf_proto_ops(&cls_flow_ops);
723}
724
725static void __exit cls_flow_exit(void)
726{
727 unregister_tcf_proto_ops(&cls_flow_ops);
728}
729
730module_init(cls_flow_init);
731module_exit(cls_flow_exit);
732
733MODULE_LICENSE("GPL");
734MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
735MODULE_DESCRIPTION("TC flow classifier");