Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/cls_flow.c Generic flow classifier
4 *
5 * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/list.h>
11#include <linux/jhash.h>
12#include <linux/random.h>
13#include <linux/pkt_cls.h>
14#include <linux/skbuff.h>
15#include <linux/in.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <linux/if_vlan.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <net/inet_sock.h>
22
23#include <net/pkt_cls.h>
24#include <net/ip.h>
25#include <net/route.h>
26#include <net/flow_dissector.h>
27
28#if IS_ENABLED(CONFIG_NF_CONNTRACK)
29#include <net/netfilter/nf_conntrack.h>
30#endif
31
32struct flow_head {
33 struct list_head filters;
34 struct rcu_head rcu;
35};
36
37struct flow_filter {
38 struct list_head list;
39 struct tcf_exts exts;
40 struct tcf_ematch_tree ematches;
41 struct tcf_proto *tp;
42 struct timer_list perturb_timer;
43 u32 perturb_period;
44 u32 handle;
45
46 u32 nkeys;
47 u32 keymask;
48 u32 mode;
49 u32 mask;
50 u32 xor;
51 u32 rshift;
52 u32 addend;
53 u32 divisor;
54 u32 baseclass;
55 u32 hashrnd;
56 struct rcu_work rwork;
57};
58
59static inline u32 addr_fold(void *addr)
60{
61 unsigned long a = (unsigned long)addr;
62
63 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
64}
65
66static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
67{
68 __be32 src = flow_get_u32_src(flow);
69
70 if (src)
71 return ntohl(src);
72
73 return addr_fold(skb->sk);
74}
75
76static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
77{
78 __be32 dst = flow_get_u32_dst(flow);
79
80 if (dst)
81 return ntohl(dst);
82
83 return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
84}
85
86static u32 flow_get_proto(const struct sk_buff *skb,
87 const struct flow_keys *flow)
88{
89 return flow->basic.ip_proto;
90}
91
92static u32 flow_get_proto_src(const struct sk_buff *skb,
93 const struct flow_keys *flow)
94{
95 if (flow->ports.ports)
96 return ntohs(flow->ports.src);
97
98 return addr_fold(skb->sk);
99}
100
101static u32 flow_get_proto_dst(const struct sk_buff *skb,
102 const struct flow_keys *flow)
103{
104 if (flow->ports.ports)
105 return ntohs(flow->ports.dst);
106
107 return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
108}
109
110static u32 flow_get_iif(const struct sk_buff *skb)
111{
112 return skb->skb_iif;
113}
114
115static u32 flow_get_priority(const struct sk_buff *skb)
116{
117 return skb->priority;
118}
119
120static u32 flow_get_mark(const struct sk_buff *skb)
121{
122 return skb->mark;
123}
124
125static u32 flow_get_nfct(const struct sk_buff *skb)
126{
127#if IS_ENABLED(CONFIG_NF_CONNTRACK)
128 return addr_fold(skb_nfct(skb));
129#else
130 return 0;
131#endif
132}
133
134#if IS_ENABLED(CONFIG_NF_CONNTRACK)
135#define CTTUPLE(skb, member) \
136({ \
137 enum ip_conntrack_info ctinfo; \
138 const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
139 if (ct == NULL) \
140 goto fallback; \
141 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \
142})
143#else
144#define CTTUPLE(skb, member) \
145({ \
146 goto fallback; \
147 0; \
148})
149#endif
150
151static u32 flow_get_nfct_src(const struct sk_buff *skb,
152 const struct flow_keys *flow)
153{
154 switch (tc_skb_protocol(skb)) {
155 case htons(ETH_P_IP):
156 return ntohl(CTTUPLE(skb, src.u3.ip));
157 case htons(ETH_P_IPV6):
158 return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
159 }
160fallback:
161 return flow_get_src(skb, flow);
162}
163
164static u32 flow_get_nfct_dst(const struct sk_buff *skb,
165 const struct flow_keys *flow)
166{
167 switch (tc_skb_protocol(skb)) {
168 case htons(ETH_P_IP):
169 return ntohl(CTTUPLE(skb, dst.u3.ip));
170 case htons(ETH_P_IPV6):
171 return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
172 }
173fallback:
174 return flow_get_dst(skb, flow);
175}
176
177static u32 flow_get_nfct_proto_src(const struct sk_buff *skb,
178 const struct flow_keys *flow)
179{
180 return ntohs(CTTUPLE(skb, src.u.all));
181fallback:
182 return flow_get_proto_src(skb, flow);
183}
184
185static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb,
186 const struct flow_keys *flow)
187{
188 return ntohs(CTTUPLE(skb, dst.u.all));
189fallback:
190 return flow_get_proto_dst(skb, flow);
191}
192
193static u32 flow_get_rtclassid(const struct sk_buff *skb)
194{
195#ifdef CONFIG_IP_ROUTE_CLASSID
196 if (skb_dst(skb))
197 return skb_dst(skb)->tclassid;
198#endif
199 return 0;
200}
201
202static u32 flow_get_skuid(const struct sk_buff *skb)
203{
204 struct sock *sk = skb_to_full_sk(skb);
205
206 if (sk && sk->sk_socket && sk->sk_socket->file) {
207 kuid_t skuid = sk->sk_socket->file->f_cred->fsuid;
208
209 return from_kuid(&init_user_ns, skuid);
210 }
211 return 0;
212}
213
214static u32 flow_get_skgid(const struct sk_buff *skb)
215{
216 struct sock *sk = skb_to_full_sk(skb);
217
218 if (sk && sk->sk_socket && sk->sk_socket->file) {
219 kgid_t skgid = sk->sk_socket->file->f_cred->fsgid;
220
221 return from_kgid(&init_user_ns, skgid);
222 }
223 return 0;
224}
225
226static u32 flow_get_vlan_tag(const struct sk_buff *skb)
227{
228 u16 uninitialized_var(tag);
229
230 if (vlan_get_tag(skb, &tag) < 0)
231 return 0;
232 return tag & VLAN_VID_MASK;
233}
234
235static u32 flow_get_rxhash(struct sk_buff *skb)
236{
237 return skb_get_hash(skb);
238}
239
240static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
241{
242 switch (key) {
243 case FLOW_KEY_SRC:
244 return flow_get_src(skb, flow);
245 case FLOW_KEY_DST:
246 return flow_get_dst(skb, flow);
247 case FLOW_KEY_PROTO:
248 return flow_get_proto(skb, flow);
249 case FLOW_KEY_PROTO_SRC:
250 return flow_get_proto_src(skb, flow);
251 case FLOW_KEY_PROTO_DST:
252 return flow_get_proto_dst(skb, flow);
253 case FLOW_KEY_IIF:
254 return flow_get_iif(skb);
255 case FLOW_KEY_PRIORITY:
256 return flow_get_priority(skb);
257 case FLOW_KEY_MARK:
258 return flow_get_mark(skb);
259 case FLOW_KEY_NFCT:
260 return flow_get_nfct(skb);
261 case FLOW_KEY_NFCT_SRC:
262 return flow_get_nfct_src(skb, flow);
263 case FLOW_KEY_NFCT_DST:
264 return flow_get_nfct_dst(skb, flow);
265 case FLOW_KEY_NFCT_PROTO_SRC:
266 return flow_get_nfct_proto_src(skb, flow);
267 case FLOW_KEY_NFCT_PROTO_DST:
268 return flow_get_nfct_proto_dst(skb, flow);
269 case FLOW_KEY_RTCLASSID:
270 return flow_get_rtclassid(skb);
271 case FLOW_KEY_SKUID:
272 return flow_get_skuid(skb);
273 case FLOW_KEY_SKGID:
274 return flow_get_skgid(skb);
275 case FLOW_KEY_VLAN_TAG:
276 return flow_get_vlan_tag(skb);
277 case FLOW_KEY_RXHASH:
278 return flow_get_rxhash(skb);
279 default:
280 WARN_ON(1);
281 return 0;
282 }
283}
284
285#define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | \
286 (1 << FLOW_KEY_DST) | \
287 (1 << FLOW_KEY_PROTO) | \
288 (1 << FLOW_KEY_PROTO_SRC) | \
289 (1 << FLOW_KEY_PROTO_DST) | \
290 (1 << FLOW_KEY_NFCT_SRC) | \
291 (1 << FLOW_KEY_NFCT_DST) | \
292 (1 << FLOW_KEY_NFCT_PROTO_SRC) | \
293 (1 << FLOW_KEY_NFCT_PROTO_DST))
294
295static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
296 struct tcf_result *res)
297{
298 struct flow_head *head = rcu_dereference_bh(tp->root);
299 struct flow_filter *f;
300 u32 keymask;
301 u32 classid;
302 unsigned int n, key;
303 int r;
304
305 list_for_each_entry_rcu(f, &head->filters, list) {
306 u32 keys[FLOW_KEY_MAX + 1];
307 struct flow_keys flow_keys;
308
309 if (!tcf_em_tree_match(skb, &f->ematches, NULL))
310 continue;
311
312 keymask = f->keymask;
313 if (keymask & FLOW_KEYS_NEEDED)
314 skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
315
316 for (n = 0; n < f->nkeys; n++) {
317 key = ffs(keymask) - 1;
318 keymask &= ~(1 << key);
319 keys[n] = flow_key_get(skb, key, &flow_keys);
320 }
321
322 if (f->mode == FLOW_MODE_HASH)
323 classid = jhash2(keys, f->nkeys, f->hashrnd);
324 else {
325 classid = keys[0];
326 classid = (classid & f->mask) ^ f->xor;
327 classid = (classid >> f->rshift) + f->addend;
328 }
329
330 if (f->divisor)
331 classid %= f->divisor;
332
333 res->class = 0;
334 res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
335
336 r = tcf_exts_exec(skb, &f->exts, res);
337 if (r < 0)
338 continue;
339 return r;
340 }
341 return -1;
342}
343
344static void flow_perturbation(struct timer_list *t)
345{
346 struct flow_filter *f = from_timer(f, t, perturb_timer);
347
348 get_random_bytes(&f->hashrnd, 4);
349 if (f->perturb_period)
350 mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
351}
352
353static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
354 [TCA_FLOW_KEYS] = { .type = NLA_U32 },
355 [TCA_FLOW_MODE] = { .type = NLA_U32 },
356 [TCA_FLOW_BASECLASS] = { .type = NLA_U32 },
357 [TCA_FLOW_RSHIFT] = { .type = NLA_U32 },
358 [TCA_FLOW_ADDEND] = { .type = NLA_U32 },
359 [TCA_FLOW_MASK] = { .type = NLA_U32 },
360 [TCA_FLOW_XOR] = { .type = NLA_U32 },
361 [TCA_FLOW_DIVISOR] = { .type = NLA_U32 },
362 [TCA_FLOW_ACT] = { .type = NLA_NESTED },
363 [TCA_FLOW_POLICE] = { .type = NLA_NESTED },
364 [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED },
365 [TCA_FLOW_PERTURB] = { .type = NLA_U32 },
366};
367
368static void __flow_destroy_filter(struct flow_filter *f)
369{
370 del_timer_sync(&f->perturb_timer);
371 tcf_exts_destroy(&f->exts);
372 tcf_em_tree_destroy(&f->ematches);
373 tcf_exts_put_net(&f->exts);
374 kfree(f);
375}
376
377static void flow_destroy_filter_work(struct work_struct *work)
378{
379 struct flow_filter *f = container_of(to_rcu_work(work),
380 struct flow_filter,
381 rwork);
382 rtnl_lock();
383 __flow_destroy_filter(f);
384 rtnl_unlock();
385}
386
387static int flow_change(struct net *net, struct sk_buff *in_skb,
388 struct tcf_proto *tp, unsigned long base,
389 u32 handle, struct nlattr **tca,
390 void **arg, bool ovr, bool rtnl_held,
391 struct netlink_ext_ack *extack)
392{
393 struct flow_head *head = rtnl_dereference(tp->root);
394 struct flow_filter *fold, *fnew;
395 struct nlattr *opt = tca[TCA_OPTIONS];
396 struct nlattr *tb[TCA_FLOW_MAX + 1];
397 unsigned int nkeys = 0;
398 unsigned int perturb_period = 0;
399 u32 baseclass = 0;
400 u32 keymask = 0;
401 u32 mode;
402 int err;
403
404 if (opt == NULL)
405 return -EINVAL;
406
407 err = nla_parse_nested_deprecated(tb, TCA_FLOW_MAX, opt, flow_policy,
408 NULL);
409 if (err < 0)
410 return err;
411
412 if (tb[TCA_FLOW_BASECLASS]) {
413 baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
414 if (TC_H_MIN(baseclass) == 0)
415 return -EINVAL;
416 }
417
418 if (tb[TCA_FLOW_KEYS]) {
419 keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
420
421 nkeys = hweight32(keymask);
422 if (nkeys == 0)
423 return -EINVAL;
424
425 if (fls(keymask) - 1 > FLOW_KEY_MAX)
426 return -EOPNOTSUPP;
427
428 if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
429 sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns)
430 return -EOPNOTSUPP;
431 }
432
433 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
434 if (!fnew)
435 return -ENOBUFS;
436
437 err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
438 if (err < 0)
439 goto err1;
440
441 err = tcf_exts_init(&fnew->exts, net, TCA_FLOW_ACT, TCA_FLOW_POLICE);
442 if (err < 0)
443 goto err2;
444
445 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr,
446 true, extack);
447 if (err < 0)
448 goto err2;
449
450 fold = *arg;
451 if (fold) {
452 err = -EINVAL;
453 if (fold->handle != handle && handle)
454 goto err2;
455
456 /* Copy fold into fnew */
457 fnew->tp = fold->tp;
458 fnew->handle = fold->handle;
459 fnew->nkeys = fold->nkeys;
460 fnew->keymask = fold->keymask;
461 fnew->mode = fold->mode;
462 fnew->mask = fold->mask;
463 fnew->xor = fold->xor;
464 fnew->rshift = fold->rshift;
465 fnew->addend = fold->addend;
466 fnew->divisor = fold->divisor;
467 fnew->baseclass = fold->baseclass;
468 fnew->hashrnd = fold->hashrnd;
469
470 mode = fold->mode;
471 if (tb[TCA_FLOW_MODE])
472 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
473 if (mode != FLOW_MODE_HASH && nkeys > 1)
474 goto err2;
475
476 if (mode == FLOW_MODE_HASH)
477 perturb_period = fold->perturb_period;
478 if (tb[TCA_FLOW_PERTURB]) {
479 if (mode != FLOW_MODE_HASH)
480 goto err2;
481 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
482 }
483 } else {
484 err = -EINVAL;
485 if (!handle)
486 goto err2;
487 if (!tb[TCA_FLOW_KEYS])
488 goto err2;
489
490 mode = FLOW_MODE_MAP;
491 if (tb[TCA_FLOW_MODE])
492 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
493 if (mode != FLOW_MODE_HASH && nkeys > 1)
494 goto err2;
495
496 if (tb[TCA_FLOW_PERTURB]) {
497 if (mode != FLOW_MODE_HASH)
498 goto err2;
499 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
500 }
501
502 if (TC_H_MAJ(baseclass) == 0) {
503 struct Qdisc *q = tcf_block_q(tp->chain->block);
504
505 baseclass = TC_H_MAKE(q->handle, baseclass);
506 }
507 if (TC_H_MIN(baseclass) == 0)
508 baseclass = TC_H_MAKE(baseclass, 1);
509
510 fnew->handle = handle;
511 fnew->mask = ~0U;
512 fnew->tp = tp;
513 get_random_bytes(&fnew->hashrnd, 4);
514 }
515
516 timer_setup(&fnew->perturb_timer, flow_perturbation, TIMER_DEFERRABLE);
517
518 tcf_block_netif_keep_dst(tp->chain->block);
519
520 if (tb[TCA_FLOW_KEYS]) {
521 fnew->keymask = keymask;
522 fnew->nkeys = nkeys;
523 }
524
525 fnew->mode = mode;
526
527 if (tb[TCA_FLOW_MASK])
528 fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
529 if (tb[TCA_FLOW_XOR])
530 fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
531 if (tb[TCA_FLOW_RSHIFT])
532 fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
533 if (tb[TCA_FLOW_ADDEND])
534 fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
535
536 if (tb[TCA_FLOW_DIVISOR])
537 fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
538 if (baseclass)
539 fnew->baseclass = baseclass;
540
541 fnew->perturb_period = perturb_period;
542 if (perturb_period)
543 mod_timer(&fnew->perturb_timer, jiffies + perturb_period);
544
545 if (!*arg)
546 list_add_tail_rcu(&fnew->list, &head->filters);
547 else
548 list_replace_rcu(&fold->list, &fnew->list);
549
550 *arg = fnew;
551
552 if (fold) {
553 tcf_exts_get_net(&fold->exts);
554 tcf_queue_work(&fold->rwork, flow_destroy_filter_work);
555 }
556 return 0;
557
558err2:
559 tcf_exts_destroy(&fnew->exts);
560 tcf_em_tree_destroy(&fnew->ematches);
561err1:
562 kfree(fnew);
563 return err;
564}
565
566static int flow_delete(struct tcf_proto *tp, void *arg, bool *last,
567 bool rtnl_held, struct netlink_ext_ack *extack)
568{
569 struct flow_head *head = rtnl_dereference(tp->root);
570 struct flow_filter *f = arg;
571
572 list_del_rcu(&f->list);
573 tcf_exts_get_net(&f->exts);
574 tcf_queue_work(&f->rwork, flow_destroy_filter_work);
575 *last = list_empty(&head->filters);
576 return 0;
577}
578
579static int flow_init(struct tcf_proto *tp)
580{
581 struct flow_head *head;
582
583 head = kzalloc(sizeof(*head), GFP_KERNEL);
584 if (head == NULL)
585 return -ENOBUFS;
586 INIT_LIST_HEAD(&head->filters);
587 rcu_assign_pointer(tp->root, head);
588 return 0;
589}
590
591static void flow_destroy(struct tcf_proto *tp, bool rtnl_held,
592 struct netlink_ext_ack *extack)
593{
594 struct flow_head *head = rtnl_dereference(tp->root);
595 struct flow_filter *f, *next;
596
597 list_for_each_entry_safe(f, next, &head->filters, list) {
598 list_del_rcu(&f->list);
599 if (tcf_exts_get_net(&f->exts))
600 tcf_queue_work(&f->rwork, flow_destroy_filter_work);
601 else
602 __flow_destroy_filter(f);
603 }
604 kfree_rcu(head, rcu);
605}
606
607static void *flow_get(struct tcf_proto *tp, u32 handle)
608{
609 struct flow_head *head = rtnl_dereference(tp->root);
610 struct flow_filter *f;
611
612 list_for_each_entry(f, &head->filters, list)
613 if (f->handle == handle)
614 return f;
615 return NULL;
616}
617
618static int flow_dump(struct net *net, struct tcf_proto *tp, void *fh,
619 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
620{
621 struct flow_filter *f = fh;
622 struct nlattr *nest;
623
624 if (f == NULL)
625 return skb->len;
626
627 t->tcm_handle = f->handle;
628
629 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
630 if (nest == NULL)
631 goto nla_put_failure;
632
633 if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
634 nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
635 goto nla_put_failure;
636
637 if (f->mask != ~0 || f->xor != 0) {
638 if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
639 nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
640 goto nla_put_failure;
641 }
642 if (f->rshift &&
643 nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
644 goto nla_put_failure;
645 if (f->addend &&
646 nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
647 goto nla_put_failure;
648
649 if (f->divisor &&
650 nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
651 goto nla_put_failure;
652 if (f->baseclass &&
653 nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
654 goto nla_put_failure;
655
656 if (f->perturb_period &&
657 nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
658 goto nla_put_failure;
659
660 if (tcf_exts_dump(skb, &f->exts) < 0)
661 goto nla_put_failure;
662#ifdef CONFIG_NET_EMATCH
663 if (f->ematches.hdr.nmatches &&
664 tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
665 goto nla_put_failure;
666#endif
667 nla_nest_end(skb, nest);
668
669 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
670 goto nla_put_failure;
671
672 return skb->len;
673
674nla_put_failure:
675 nla_nest_cancel(skb, nest);
676 return -1;
677}
678
679static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg,
680 bool rtnl_held)
681{
682 struct flow_head *head = rtnl_dereference(tp->root);
683 struct flow_filter *f;
684
685 list_for_each_entry(f, &head->filters, list) {
686 if (arg->count < arg->skip)
687 goto skip;
688 if (arg->fn(tp, f, arg) < 0) {
689 arg->stop = 1;
690 break;
691 }
692skip:
693 arg->count++;
694 }
695}
696
697static struct tcf_proto_ops cls_flow_ops __read_mostly = {
698 .kind = "flow",
699 .classify = flow_classify,
700 .init = flow_init,
701 .destroy = flow_destroy,
702 .change = flow_change,
703 .delete = flow_delete,
704 .get = flow_get,
705 .dump = flow_dump,
706 .walk = flow_walk,
707 .owner = THIS_MODULE,
708};
709
710static int __init cls_flow_init(void)
711{
712 return register_tcf_proto_ops(&cls_flow_ops);
713}
714
715static void __exit cls_flow_exit(void)
716{
717 unregister_tcf_proto_ops(&cls_flow_ops);
718}
719
720module_init(cls_flow_init);
721module_exit(cls_flow_exit);
722
723MODULE_LICENSE("GPL");
724MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
725MODULE_DESCRIPTION("TC flow classifier");
1/*
2 * net/sched/cls_flow.c Generic flow classifier
3 *
4 * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/list.h>
15#include <linux/jhash.h>
16#include <linux/random.h>
17#include <linux/pkt_cls.h>
18#include <linux/skbuff.h>
19#include <linux/in.h>
20#include <linux/ip.h>
21#include <linux/ipv6.h>
22#include <linux/if_vlan.h>
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <net/inet_sock.h>
26
27#include <net/pkt_cls.h>
28#include <net/ip.h>
29#include <net/route.h>
30#include <net/flow_dissector.h>
31
32#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
33#include <net/netfilter/nf_conntrack.h>
34#endif
35
36struct flow_head {
37 struct list_head filters;
38 struct rcu_head rcu;
39};
40
41struct flow_filter {
42 struct list_head list;
43 struct tcf_exts exts;
44 struct tcf_ematch_tree ematches;
45 struct tcf_proto *tp;
46 struct timer_list perturb_timer;
47 u32 perturb_period;
48 u32 handle;
49
50 u32 nkeys;
51 u32 keymask;
52 u32 mode;
53 u32 mask;
54 u32 xor;
55 u32 rshift;
56 u32 addend;
57 u32 divisor;
58 u32 baseclass;
59 u32 hashrnd;
60 struct rcu_head rcu;
61};
62
63static inline u32 addr_fold(void *addr)
64{
65 unsigned long a = (unsigned long)addr;
66
67 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
68}
69
70static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
71{
72 __be32 src = flow_get_u32_src(flow);
73
74 if (src)
75 return ntohl(src);
76
77 return addr_fold(skb->sk);
78}
79
80static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
81{
82 __be32 dst = flow_get_u32_dst(flow);
83
84 if (dst)
85 return ntohl(dst);
86
87 return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
88}
89
90static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow)
91{
92 return flow->basic.ip_proto;
93}
94
95static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
96{
97 if (flow->ports.ports)
98 return ntohs(flow->ports.src);
99
100 return addr_fold(skb->sk);
101}
102
103static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
104{
105 if (flow->ports.ports)
106 return ntohs(flow->ports.dst);
107
108 return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
109}
110
111static u32 flow_get_iif(const struct sk_buff *skb)
112{
113 return skb->skb_iif;
114}
115
116static u32 flow_get_priority(const struct sk_buff *skb)
117{
118 return skb->priority;
119}
120
121static u32 flow_get_mark(const struct sk_buff *skb)
122{
123 return skb->mark;
124}
125
126static u32 flow_get_nfct(const struct sk_buff *skb)
127{
128#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
129 return addr_fold(skb->nfct);
130#else
131 return 0;
132#endif
133}
134
135#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
136#define CTTUPLE(skb, member) \
137({ \
138 enum ip_conntrack_info ctinfo; \
139 const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
140 if (ct == NULL) \
141 goto fallback; \
142 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \
143})
144#else
145#define CTTUPLE(skb, member) \
146({ \
147 goto fallback; \
148 0; \
149})
150#endif
151
152static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow)
153{
154 switch (tc_skb_protocol(skb)) {
155 case htons(ETH_P_IP):
156 return ntohl(CTTUPLE(skb, src.u3.ip));
157 case htons(ETH_P_IPV6):
158 return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
159 }
160fallback:
161 return flow_get_src(skb, flow);
162}
163
164static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow)
165{
166 switch (tc_skb_protocol(skb)) {
167 case htons(ETH_P_IP):
168 return ntohl(CTTUPLE(skb, dst.u3.ip));
169 case htons(ETH_P_IPV6):
170 return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
171 }
172fallback:
173 return flow_get_dst(skb, flow);
174}
175
176static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
177{
178 return ntohs(CTTUPLE(skb, src.u.all));
179fallback:
180 return flow_get_proto_src(skb, flow);
181}
182
183static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
184{
185 return ntohs(CTTUPLE(skb, dst.u.all));
186fallback:
187 return flow_get_proto_dst(skb, flow);
188}
189
190static u32 flow_get_rtclassid(const struct sk_buff *skb)
191{
192#ifdef CONFIG_IP_ROUTE_CLASSID
193 if (skb_dst(skb))
194 return skb_dst(skb)->tclassid;
195#endif
196 return 0;
197}
198
199static u32 flow_get_skuid(const struct sk_buff *skb)
200{
201 struct sock *sk = skb_to_full_sk(skb);
202
203 if (sk && sk->sk_socket && sk->sk_socket->file) {
204 kuid_t skuid = sk->sk_socket->file->f_cred->fsuid;
205
206 return from_kuid(&init_user_ns, skuid);
207 }
208 return 0;
209}
210
211static u32 flow_get_skgid(const struct sk_buff *skb)
212{
213 struct sock *sk = skb_to_full_sk(skb);
214
215 if (sk && sk->sk_socket && sk->sk_socket->file) {
216 kgid_t skgid = sk->sk_socket->file->f_cred->fsgid;
217
218 return from_kgid(&init_user_ns, skgid);
219 }
220 return 0;
221}
222
223static u32 flow_get_vlan_tag(const struct sk_buff *skb)
224{
225 u16 uninitialized_var(tag);
226
227 if (vlan_get_tag(skb, &tag) < 0)
228 return 0;
229 return tag & VLAN_VID_MASK;
230}
231
232static u32 flow_get_rxhash(struct sk_buff *skb)
233{
234 return skb_get_hash(skb);
235}
236
237static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
238{
239 switch (key) {
240 case FLOW_KEY_SRC:
241 return flow_get_src(skb, flow);
242 case FLOW_KEY_DST:
243 return flow_get_dst(skb, flow);
244 case FLOW_KEY_PROTO:
245 return flow_get_proto(skb, flow);
246 case FLOW_KEY_PROTO_SRC:
247 return flow_get_proto_src(skb, flow);
248 case FLOW_KEY_PROTO_DST:
249 return flow_get_proto_dst(skb, flow);
250 case FLOW_KEY_IIF:
251 return flow_get_iif(skb);
252 case FLOW_KEY_PRIORITY:
253 return flow_get_priority(skb);
254 case FLOW_KEY_MARK:
255 return flow_get_mark(skb);
256 case FLOW_KEY_NFCT:
257 return flow_get_nfct(skb);
258 case FLOW_KEY_NFCT_SRC:
259 return flow_get_nfct_src(skb, flow);
260 case FLOW_KEY_NFCT_DST:
261 return flow_get_nfct_dst(skb, flow);
262 case FLOW_KEY_NFCT_PROTO_SRC:
263 return flow_get_nfct_proto_src(skb, flow);
264 case FLOW_KEY_NFCT_PROTO_DST:
265 return flow_get_nfct_proto_dst(skb, flow);
266 case FLOW_KEY_RTCLASSID:
267 return flow_get_rtclassid(skb);
268 case FLOW_KEY_SKUID:
269 return flow_get_skuid(skb);
270 case FLOW_KEY_SKGID:
271 return flow_get_skgid(skb);
272 case FLOW_KEY_VLAN_TAG:
273 return flow_get_vlan_tag(skb);
274 case FLOW_KEY_RXHASH:
275 return flow_get_rxhash(skb);
276 default:
277 WARN_ON(1);
278 return 0;
279 }
280}
281
282#define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | \
283 (1 << FLOW_KEY_DST) | \
284 (1 << FLOW_KEY_PROTO) | \
285 (1 << FLOW_KEY_PROTO_SRC) | \
286 (1 << FLOW_KEY_PROTO_DST) | \
287 (1 << FLOW_KEY_NFCT_SRC) | \
288 (1 << FLOW_KEY_NFCT_DST) | \
289 (1 << FLOW_KEY_NFCT_PROTO_SRC) | \
290 (1 << FLOW_KEY_NFCT_PROTO_DST))
291
292static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
293 struct tcf_result *res)
294{
295 struct flow_head *head = rcu_dereference_bh(tp->root);
296 struct flow_filter *f;
297 u32 keymask;
298 u32 classid;
299 unsigned int n, key;
300 int r;
301
302 list_for_each_entry_rcu(f, &head->filters, list) {
303 u32 keys[FLOW_KEY_MAX + 1];
304 struct flow_keys flow_keys;
305
306 if (!tcf_em_tree_match(skb, &f->ematches, NULL))
307 continue;
308
309 keymask = f->keymask;
310 if (keymask & FLOW_KEYS_NEEDED)
311 skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
312
313 for (n = 0; n < f->nkeys; n++) {
314 key = ffs(keymask) - 1;
315 keymask &= ~(1 << key);
316 keys[n] = flow_key_get(skb, key, &flow_keys);
317 }
318
319 if (f->mode == FLOW_MODE_HASH)
320 classid = jhash2(keys, f->nkeys, f->hashrnd);
321 else {
322 classid = keys[0];
323 classid = (classid & f->mask) ^ f->xor;
324 classid = (classid >> f->rshift) + f->addend;
325 }
326
327 if (f->divisor)
328 classid %= f->divisor;
329
330 res->class = 0;
331 res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
332
333 r = tcf_exts_exec(skb, &f->exts, res);
334 if (r < 0)
335 continue;
336 return r;
337 }
338 return -1;
339}
340
341static void flow_perturbation(unsigned long arg)
342{
343 struct flow_filter *f = (struct flow_filter *)arg;
344
345 get_random_bytes(&f->hashrnd, 4);
346 if (f->perturb_period)
347 mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
348}
349
350static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
351 [TCA_FLOW_KEYS] = { .type = NLA_U32 },
352 [TCA_FLOW_MODE] = { .type = NLA_U32 },
353 [TCA_FLOW_BASECLASS] = { .type = NLA_U32 },
354 [TCA_FLOW_RSHIFT] = { .type = NLA_U32 },
355 [TCA_FLOW_ADDEND] = { .type = NLA_U32 },
356 [TCA_FLOW_MASK] = { .type = NLA_U32 },
357 [TCA_FLOW_XOR] = { .type = NLA_U32 },
358 [TCA_FLOW_DIVISOR] = { .type = NLA_U32 },
359 [TCA_FLOW_ACT] = { .type = NLA_NESTED },
360 [TCA_FLOW_POLICE] = { .type = NLA_NESTED },
361 [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED },
362 [TCA_FLOW_PERTURB] = { .type = NLA_U32 },
363};
364
365static void flow_destroy_filter(struct rcu_head *head)
366{
367 struct flow_filter *f = container_of(head, struct flow_filter, rcu);
368
369 del_timer_sync(&f->perturb_timer);
370 tcf_exts_destroy(&f->exts);
371 tcf_em_tree_destroy(&f->ematches);
372 kfree(f);
373}
374
375static int flow_change(struct net *net, struct sk_buff *in_skb,
376 struct tcf_proto *tp, unsigned long base,
377 u32 handle, struct nlattr **tca,
378 unsigned long *arg, bool ovr)
379{
380 struct flow_head *head = rtnl_dereference(tp->root);
381 struct flow_filter *fold, *fnew;
382 struct nlattr *opt = tca[TCA_OPTIONS];
383 struct nlattr *tb[TCA_FLOW_MAX + 1];
384 struct tcf_exts e;
385 struct tcf_ematch_tree t;
386 unsigned int nkeys = 0;
387 unsigned int perturb_period = 0;
388 u32 baseclass = 0;
389 u32 keymask = 0;
390 u32 mode;
391 int err;
392
393 if (opt == NULL)
394 return -EINVAL;
395
396 err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy);
397 if (err < 0)
398 return err;
399
400 if (tb[TCA_FLOW_BASECLASS]) {
401 baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
402 if (TC_H_MIN(baseclass) == 0)
403 return -EINVAL;
404 }
405
406 if (tb[TCA_FLOW_KEYS]) {
407 keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
408
409 nkeys = hweight32(keymask);
410 if (nkeys == 0)
411 return -EINVAL;
412
413 if (fls(keymask) - 1 > FLOW_KEY_MAX)
414 return -EOPNOTSUPP;
415
416 if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
417 sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns)
418 return -EOPNOTSUPP;
419 }
420
421 tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE);
422 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
423 if (err < 0)
424 return err;
425
426 err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &t);
427 if (err < 0)
428 goto err1;
429
430 err = -ENOBUFS;
431 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
432 if (!fnew)
433 goto err2;
434
435 tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
436
437 fold = (struct flow_filter *)*arg;
438 if (fold) {
439 err = -EINVAL;
440 if (fold->handle != handle && handle)
441 goto err2;
442
443 /* Copy fold into fnew */
444 fnew->tp = fold->tp;
445 fnew->handle = fold->handle;
446 fnew->nkeys = fold->nkeys;
447 fnew->keymask = fold->keymask;
448 fnew->mode = fold->mode;
449 fnew->mask = fold->mask;
450 fnew->xor = fold->xor;
451 fnew->rshift = fold->rshift;
452 fnew->addend = fold->addend;
453 fnew->divisor = fold->divisor;
454 fnew->baseclass = fold->baseclass;
455 fnew->hashrnd = fold->hashrnd;
456
457 mode = fold->mode;
458 if (tb[TCA_FLOW_MODE])
459 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
460 if (mode != FLOW_MODE_HASH && nkeys > 1)
461 goto err2;
462
463 if (mode == FLOW_MODE_HASH)
464 perturb_period = fold->perturb_period;
465 if (tb[TCA_FLOW_PERTURB]) {
466 if (mode != FLOW_MODE_HASH)
467 goto err2;
468 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
469 }
470 } else {
471 err = -EINVAL;
472 if (!handle)
473 goto err2;
474 if (!tb[TCA_FLOW_KEYS])
475 goto err2;
476
477 mode = FLOW_MODE_MAP;
478 if (tb[TCA_FLOW_MODE])
479 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
480 if (mode != FLOW_MODE_HASH && nkeys > 1)
481 goto err2;
482
483 if (tb[TCA_FLOW_PERTURB]) {
484 if (mode != FLOW_MODE_HASH)
485 goto err2;
486 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
487 }
488
489 if (TC_H_MAJ(baseclass) == 0)
490 baseclass = TC_H_MAKE(tp->q->handle, baseclass);
491 if (TC_H_MIN(baseclass) == 0)
492 baseclass = TC_H_MAKE(baseclass, 1);
493
494 fnew->handle = handle;
495 fnew->mask = ~0U;
496 fnew->tp = tp;
497 get_random_bytes(&fnew->hashrnd, 4);
498 }
499
500 fnew->perturb_timer.function = flow_perturbation;
501 fnew->perturb_timer.data = (unsigned long)fnew;
502 init_timer_deferrable(&fnew->perturb_timer);
503
504 tcf_exts_change(tp, &fnew->exts, &e);
505 tcf_em_tree_change(tp, &fnew->ematches, &t);
506
507 netif_keep_dst(qdisc_dev(tp->q));
508
509 if (tb[TCA_FLOW_KEYS]) {
510 fnew->keymask = keymask;
511 fnew->nkeys = nkeys;
512 }
513
514 fnew->mode = mode;
515
516 if (tb[TCA_FLOW_MASK])
517 fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
518 if (tb[TCA_FLOW_XOR])
519 fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
520 if (tb[TCA_FLOW_RSHIFT])
521 fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
522 if (tb[TCA_FLOW_ADDEND])
523 fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
524
525 if (tb[TCA_FLOW_DIVISOR])
526 fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
527 if (baseclass)
528 fnew->baseclass = baseclass;
529
530 fnew->perturb_period = perturb_period;
531 if (perturb_period)
532 mod_timer(&fnew->perturb_timer, jiffies + perturb_period);
533
534 if (*arg == 0)
535 list_add_tail_rcu(&fnew->list, &head->filters);
536 else
537 list_replace_rcu(&fold->list, &fnew->list);
538
539 *arg = (unsigned long)fnew;
540
541 if (fold)
542 call_rcu(&fold->rcu, flow_destroy_filter);
543 return 0;
544
545err2:
546 tcf_em_tree_destroy(&t);
547 kfree(fnew);
548err1:
549 tcf_exts_destroy(&e);
550 return err;
551}
552
553static int flow_delete(struct tcf_proto *tp, unsigned long arg)
554{
555 struct flow_filter *f = (struct flow_filter *)arg;
556
557 list_del_rcu(&f->list);
558 call_rcu(&f->rcu, flow_destroy_filter);
559 return 0;
560}
561
562static int flow_init(struct tcf_proto *tp)
563{
564 struct flow_head *head;
565
566 head = kzalloc(sizeof(*head), GFP_KERNEL);
567 if (head == NULL)
568 return -ENOBUFS;
569 INIT_LIST_HEAD(&head->filters);
570 rcu_assign_pointer(tp->root, head);
571 return 0;
572}
573
574static bool flow_destroy(struct tcf_proto *tp, bool force)
575{
576 struct flow_head *head = rtnl_dereference(tp->root);
577 struct flow_filter *f, *next;
578
579 if (!force && !list_empty(&head->filters))
580 return false;
581
582 list_for_each_entry_safe(f, next, &head->filters, list) {
583 list_del_rcu(&f->list);
584 call_rcu(&f->rcu, flow_destroy_filter);
585 }
586 RCU_INIT_POINTER(tp->root, NULL);
587 kfree_rcu(head, rcu);
588 return true;
589}
590
591static unsigned long flow_get(struct tcf_proto *tp, u32 handle)
592{
593 struct flow_head *head = rtnl_dereference(tp->root);
594 struct flow_filter *f;
595
596 list_for_each_entry(f, &head->filters, list)
597 if (f->handle == handle)
598 return (unsigned long)f;
599 return 0;
600}
601
602static int flow_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
603 struct sk_buff *skb, struct tcmsg *t)
604{
605 struct flow_filter *f = (struct flow_filter *)fh;
606 struct nlattr *nest;
607
608 if (f == NULL)
609 return skb->len;
610
611 t->tcm_handle = f->handle;
612
613 nest = nla_nest_start(skb, TCA_OPTIONS);
614 if (nest == NULL)
615 goto nla_put_failure;
616
617 if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
618 nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
619 goto nla_put_failure;
620
621 if (f->mask != ~0 || f->xor != 0) {
622 if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
623 nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
624 goto nla_put_failure;
625 }
626 if (f->rshift &&
627 nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
628 goto nla_put_failure;
629 if (f->addend &&
630 nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
631 goto nla_put_failure;
632
633 if (f->divisor &&
634 nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
635 goto nla_put_failure;
636 if (f->baseclass &&
637 nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
638 goto nla_put_failure;
639
640 if (f->perturb_period &&
641 nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
642 goto nla_put_failure;
643
644 if (tcf_exts_dump(skb, &f->exts) < 0)
645 goto nla_put_failure;
646#ifdef CONFIG_NET_EMATCH
647 if (f->ematches.hdr.nmatches &&
648 tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
649 goto nla_put_failure;
650#endif
651 nla_nest_end(skb, nest);
652
653 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
654 goto nla_put_failure;
655
656 return skb->len;
657
658nla_put_failure:
659 nla_nest_cancel(skb, nest);
660 return -1;
661}
662
663static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg)
664{
665 struct flow_head *head = rtnl_dereference(tp->root);
666 struct flow_filter *f;
667
668 list_for_each_entry(f, &head->filters, list) {
669 if (arg->count < arg->skip)
670 goto skip;
671 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
672 arg->stop = 1;
673 break;
674 }
675skip:
676 arg->count++;
677 }
678}
679
680static struct tcf_proto_ops cls_flow_ops __read_mostly = {
681 .kind = "flow",
682 .classify = flow_classify,
683 .init = flow_init,
684 .destroy = flow_destroy,
685 .change = flow_change,
686 .delete = flow_delete,
687 .get = flow_get,
688 .dump = flow_dump,
689 .walk = flow_walk,
690 .owner = THIS_MODULE,
691};
692
693static int __init cls_flow_init(void)
694{
695 return register_tcf_proto_ops(&cls_flow_ops);
696}
697
698static void __exit cls_flow_exit(void)
699{
700 unregister_tcf_proto_ops(&cls_flow_ops);
701}
702
703module_init(cls_flow_init);
704module_exit(cls_flow_exit);
705
706MODULE_LICENSE("GPL");
707MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
708MODULE_DESCRIPTION("TC flow classifier");