Loading...
1/*
2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * The filters are packed to hash tables of key nodes
12 * with a set of 32bit key/mask pairs at every node.
13 * Nodes reference next level hash tables etc.
14 *
15 * This scheme is the best universal classifier I managed to
16 * invent; it is not super-fast, but it is not slow (provided you
17 * program it correctly), and general enough. And its relative
18 * speed grows as the number of rules becomes larger.
19 *
20 * It seems that it represents the best middle point between
21 * speed and manageability both by human and by machine.
22 *
23 * It is especially useful for link sharing combined with QoS;
24 * pure RSVP doesn't need such a general approach and can use
25 * much simpler (and faster) schemes, sort of cls_rsvp.c.
26 *
27 * JHS: We should remove the CONFIG_NET_CLS_IND from here
28 * eventually when the meta match extension is made available
29 *
30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
31 */
32
33#include <linux/module.h>
34#include <linux/slab.h>
35#include <linux/types.h>
36#include <linux/kernel.h>
37#include <linux/string.h>
38#include <linux/errno.h>
39#include <linux/rtnetlink.h>
40#include <linux/skbuff.h>
41#include <net/netlink.h>
42#include <net/act_api.h>
43#include <net/pkt_cls.h>
44
45struct tc_u_knode {
46 struct tc_u_knode *next;
47 u32 handle;
48 struct tc_u_hnode *ht_up;
49 struct tcf_exts exts;
50#ifdef CONFIG_NET_CLS_IND
51 char indev[IFNAMSIZ];
52#endif
53 u8 fshift;
54 struct tcf_result res;
55 struct tc_u_hnode *ht_down;
56#ifdef CONFIG_CLS_U32_PERF
57 struct tc_u32_pcnt *pf;
58#endif
59#ifdef CONFIG_CLS_U32_MARK
60 struct tc_u32_mark mark;
61#endif
62 struct tc_u32_sel sel;
63};
64
65struct tc_u_hnode {
66 struct tc_u_hnode *next;
67 u32 handle;
68 u32 prio;
69 struct tc_u_common *tp_c;
70 int refcnt;
71 unsigned int divisor;
72 struct tc_u_knode *ht[1];
73};
74
75struct tc_u_common {
76 struct tc_u_hnode *hlist;
77 struct Qdisc *q;
78 int refcnt;
79 u32 hgenerator;
80};
81
82static const struct tcf_ext_map u32_ext_map = {
83 .action = TCA_U32_ACT,
84 .police = TCA_U32_POLICE
85};
86
87static inline unsigned int u32_hash_fold(__be32 key,
88 const struct tc_u32_sel *sel,
89 u8 fshift)
90{
91 unsigned int h = ntohl(key & sel->hmask) >> fshift;
92
93 return h;
94}
95
96static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res)
97{
98 struct {
99 struct tc_u_knode *knode;
100 unsigned int off;
101 } stack[TC_U32_MAXDEPTH];
102
103 struct tc_u_hnode *ht = (struct tc_u_hnode *)tp->root;
104 unsigned int off = skb_network_offset(skb);
105 struct tc_u_knode *n;
106 int sdepth = 0;
107 int off2 = 0;
108 int sel = 0;
109#ifdef CONFIG_CLS_U32_PERF
110 int j;
111#endif
112 int i, r;
113
114next_ht:
115 n = ht->ht[sel];
116
117next_knode:
118 if (n) {
119 struct tc_u32_key *key = n->sel.keys;
120
121#ifdef CONFIG_CLS_U32_PERF
122 n->pf->rcnt += 1;
123 j = 0;
124#endif
125
126#ifdef CONFIG_CLS_U32_MARK
127 if ((skb->mark & n->mark.mask) != n->mark.val) {
128 n = n->next;
129 goto next_knode;
130 } else {
131 n->mark.success++;
132 }
133#endif
134
135 for (i = n->sel.nkeys; i > 0; i--, key++) {
136 int toff = off + key->off + (off2 & key->offmask);
137 __be32 *data, hdata;
138
139 if (skb_headroom(skb) + toff > INT_MAX)
140 goto out;
141
142 data = skb_header_pointer(skb, toff, 4, &hdata);
143 if (!data)
144 goto out;
145 if ((*data ^ key->val) & key->mask) {
146 n = n->next;
147 goto next_knode;
148 }
149#ifdef CONFIG_CLS_U32_PERF
150 n->pf->kcnts[j] += 1;
151 j++;
152#endif
153 }
154 if (n->ht_down == NULL) {
155check_terminal:
156 if (n->sel.flags & TC_U32_TERMINAL) {
157
158 *res = n->res;
159#ifdef CONFIG_NET_CLS_IND
160 if (!tcf_match_indev(skb, n->indev)) {
161 n = n->next;
162 goto next_knode;
163 }
164#endif
165#ifdef CONFIG_CLS_U32_PERF
166 n->pf->rhit += 1;
167#endif
168 r = tcf_exts_exec(skb, &n->exts, res);
169 if (r < 0) {
170 n = n->next;
171 goto next_knode;
172 }
173
174 return r;
175 }
176 n = n->next;
177 goto next_knode;
178 }
179
180 /* PUSH */
181 if (sdepth >= TC_U32_MAXDEPTH)
182 goto deadloop;
183 stack[sdepth].knode = n;
184 stack[sdepth].off = off;
185 sdepth++;
186
187 ht = n->ht_down;
188 sel = 0;
189 if (ht->divisor) {
190 __be32 *data, hdata;
191
192 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
193 &hdata);
194 if (!data)
195 goto out;
196 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
197 n->fshift);
198 }
199 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
200 goto next_ht;
201
202 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
203 off2 = n->sel.off + 3;
204 if (n->sel.flags & TC_U32_VAROFFSET) {
205 __be16 *data, hdata;
206
207 data = skb_header_pointer(skb,
208 off + n->sel.offoff,
209 2, &hdata);
210 if (!data)
211 goto out;
212 off2 += ntohs(n->sel.offmask & *data) >>
213 n->sel.offshift;
214 }
215 off2 &= ~3;
216 }
217 if (n->sel.flags & TC_U32_EAT) {
218 off += off2;
219 off2 = 0;
220 }
221
222 if (off < skb->len)
223 goto next_ht;
224 }
225
226 /* POP */
227 if (sdepth--) {
228 n = stack[sdepth].knode;
229 ht = n->ht_up;
230 off = stack[sdepth].off;
231 goto check_terminal;
232 }
233out:
234 return -1;
235
236deadloop:
237 if (net_ratelimit())
238 pr_warning("cls_u32: dead loop\n");
239 return -1;
240}
241
242static struct tc_u_hnode *
243u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
244{
245 struct tc_u_hnode *ht;
246
247 for (ht = tp_c->hlist; ht; ht = ht->next)
248 if (ht->handle == handle)
249 break;
250
251 return ht;
252}
253
254static struct tc_u_knode *
255u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
256{
257 unsigned int sel;
258 struct tc_u_knode *n = NULL;
259
260 sel = TC_U32_HASH(handle);
261 if (sel > ht->divisor)
262 goto out;
263
264 for (n = ht->ht[sel]; n; n = n->next)
265 if (n->handle == handle)
266 break;
267out:
268 return n;
269}
270
271
272static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
273{
274 struct tc_u_hnode *ht;
275 struct tc_u_common *tp_c = tp->data;
276
277 if (TC_U32_HTID(handle) == TC_U32_ROOT)
278 ht = tp->root;
279 else
280 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
281
282 if (!ht)
283 return 0;
284
285 if (TC_U32_KEY(handle) == 0)
286 return (unsigned long)ht;
287
288 return (unsigned long)u32_lookup_key(ht, handle);
289}
290
291static void u32_put(struct tcf_proto *tp, unsigned long f)
292{
293}
294
295static u32 gen_new_htid(struct tc_u_common *tp_c)
296{
297 int i = 0x800;
298
299 do {
300 if (++tp_c->hgenerator == 0x7FF)
301 tp_c->hgenerator = 1;
302 } while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
303
304 return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
305}
306
307static int u32_init(struct tcf_proto *tp)
308{
309 struct tc_u_hnode *root_ht;
310 struct tc_u_common *tp_c;
311
312 tp_c = tp->q->u32_node;
313
314 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
315 if (root_ht == NULL)
316 return -ENOBUFS;
317
318 root_ht->divisor = 0;
319 root_ht->refcnt++;
320 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
321 root_ht->prio = tp->prio;
322
323 if (tp_c == NULL) {
324 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
325 if (tp_c == NULL) {
326 kfree(root_ht);
327 return -ENOBUFS;
328 }
329 tp_c->q = tp->q;
330 tp->q->u32_node = tp_c;
331 }
332
333 tp_c->refcnt++;
334 root_ht->next = tp_c->hlist;
335 tp_c->hlist = root_ht;
336 root_ht->tp_c = tp_c;
337
338 tp->root = root_ht;
339 tp->data = tp_c;
340 return 0;
341}
342
343static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
344{
345 tcf_unbind_filter(tp, &n->res);
346 tcf_exts_destroy(tp, &n->exts);
347 if (n->ht_down)
348 n->ht_down->refcnt--;
349#ifdef CONFIG_CLS_U32_PERF
350 kfree(n->pf);
351#endif
352 kfree(n);
353 return 0;
354}
355
356static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
357{
358 struct tc_u_knode **kp;
359 struct tc_u_hnode *ht = key->ht_up;
360
361 if (ht) {
362 for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
363 if (*kp == key) {
364 tcf_tree_lock(tp);
365 *kp = key->next;
366 tcf_tree_unlock(tp);
367
368 u32_destroy_key(tp, key);
369 return 0;
370 }
371 }
372 }
373 WARN_ON(1);
374 return 0;
375}
376
377static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
378{
379 struct tc_u_knode *n;
380 unsigned int h;
381
382 for (h = 0; h <= ht->divisor; h++) {
383 while ((n = ht->ht[h]) != NULL) {
384 ht->ht[h] = n->next;
385
386 u32_destroy_key(tp, n);
387 }
388 }
389}
390
391static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
392{
393 struct tc_u_common *tp_c = tp->data;
394 struct tc_u_hnode **hn;
395
396 WARN_ON(ht->refcnt);
397
398 u32_clear_hnode(tp, ht);
399
400 for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) {
401 if (*hn == ht) {
402 *hn = ht->next;
403 kfree(ht);
404 return 0;
405 }
406 }
407
408 WARN_ON(1);
409 return -ENOENT;
410}
411
412static void u32_destroy(struct tcf_proto *tp)
413{
414 struct tc_u_common *tp_c = tp->data;
415 struct tc_u_hnode *root_ht = tp->root;
416
417 WARN_ON(root_ht == NULL);
418
419 if (root_ht && --root_ht->refcnt == 0)
420 u32_destroy_hnode(tp, root_ht);
421
422 if (--tp_c->refcnt == 0) {
423 struct tc_u_hnode *ht;
424
425 tp->q->u32_node = NULL;
426
427 for (ht = tp_c->hlist; ht; ht = ht->next) {
428 ht->refcnt--;
429 u32_clear_hnode(tp, ht);
430 }
431
432 while ((ht = tp_c->hlist) != NULL) {
433 tp_c->hlist = ht->next;
434
435 WARN_ON(ht->refcnt != 0);
436
437 kfree(ht);
438 }
439
440 kfree(tp_c);
441 }
442
443 tp->data = NULL;
444}
445
446static int u32_delete(struct tcf_proto *tp, unsigned long arg)
447{
448 struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
449
450 if (ht == NULL)
451 return 0;
452
453 if (TC_U32_KEY(ht->handle))
454 return u32_delete_key(tp, (struct tc_u_knode *)ht);
455
456 if (tp->root == ht)
457 return -EINVAL;
458
459 if (ht->refcnt == 1) {
460 ht->refcnt--;
461 u32_destroy_hnode(tp, ht);
462 } else {
463 return -EBUSY;
464 }
465
466 return 0;
467}
468
469static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
470{
471 struct tc_u_knode *n;
472 unsigned int i = 0x7FF;
473
474 for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
475 if (i < TC_U32_NODE(n->handle))
476 i = TC_U32_NODE(n->handle);
477 i++;
478
479 return handle | (i > 0xFFF ? 0xFFF : i);
480}
481
482static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
483 [TCA_U32_CLASSID] = { .type = NLA_U32 },
484 [TCA_U32_HASH] = { .type = NLA_U32 },
485 [TCA_U32_LINK] = { .type = NLA_U32 },
486 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
487 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
488 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
489 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
490};
491
492static int u32_set_parms(struct tcf_proto *tp, unsigned long base,
493 struct tc_u_hnode *ht,
494 struct tc_u_knode *n, struct nlattr **tb,
495 struct nlattr *est)
496{
497 int err;
498 struct tcf_exts e;
499
500 err = tcf_exts_validate(tp, tb, est, &e, &u32_ext_map);
501 if (err < 0)
502 return err;
503
504 err = -EINVAL;
505 if (tb[TCA_U32_LINK]) {
506 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
507 struct tc_u_hnode *ht_down = NULL, *ht_old;
508
509 if (TC_U32_KEY(handle))
510 goto errout;
511
512 if (handle) {
513 ht_down = u32_lookup_ht(ht->tp_c, handle);
514
515 if (ht_down == NULL)
516 goto errout;
517 ht_down->refcnt++;
518 }
519
520 tcf_tree_lock(tp);
521 ht_old = n->ht_down;
522 n->ht_down = ht_down;
523 tcf_tree_unlock(tp);
524
525 if (ht_old)
526 ht_old->refcnt--;
527 }
528 if (tb[TCA_U32_CLASSID]) {
529 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
530 tcf_bind_filter(tp, &n->res, base);
531 }
532
533#ifdef CONFIG_NET_CLS_IND
534 if (tb[TCA_U32_INDEV]) {
535 err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV]);
536 if (err < 0)
537 goto errout;
538 }
539#endif
540 tcf_exts_change(tp, &n->exts, &e);
541
542 return 0;
543errout:
544 tcf_exts_destroy(tp, &e);
545 return err;
546}
547
548static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
549 struct nlattr **tca,
550 unsigned long *arg)
551{
552 struct tc_u_common *tp_c = tp->data;
553 struct tc_u_hnode *ht;
554 struct tc_u_knode *n;
555 struct tc_u32_sel *s;
556 struct nlattr *opt = tca[TCA_OPTIONS];
557 struct nlattr *tb[TCA_U32_MAX + 1];
558 u32 htid;
559 int err;
560
561 if (opt == NULL)
562 return handle ? -EINVAL : 0;
563
564 err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy);
565 if (err < 0)
566 return err;
567
568 n = (struct tc_u_knode *)*arg;
569 if (n) {
570 if (TC_U32_KEY(n->handle) == 0)
571 return -EINVAL;
572
573 return u32_set_parms(tp, base, n->ht_up, n, tb, tca[TCA_RATE]);
574 }
575
576 if (tb[TCA_U32_DIVISOR]) {
577 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
578
579 if (--divisor > 0x100)
580 return -EINVAL;
581 if (TC_U32_KEY(handle))
582 return -EINVAL;
583 if (handle == 0) {
584 handle = gen_new_htid(tp->data);
585 if (handle == 0)
586 return -ENOMEM;
587 }
588 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
589 if (ht == NULL)
590 return -ENOBUFS;
591 ht->tp_c = tp_c;
592 ht->refcnt = 1;
593 ht->divisor = divisor;
594 ht->handle = handle;
595 ht->prio = tp->prio;
596 ht->next = tp_c->hlist;
597 tp_c->hlist = ht;
598 *arg = (unsigned long)ht;
599 return 0;
600 }
601
602 if (tb[TCA_U32_HASH]) {
603 htid = nla_get_u32(tb[TCA_U32_HASH]);
604 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
605 ht = tp->root;
606 htid = ht->handle;
607 } else {
608 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
609 if (ht == NULL)
610 return -EINVAL;
611 }
612 } else {
613 ht = tp->root;
614 htid = ht->handle;
615 }
616
617 if (ht->divisor < TC_U32_HASH(htid))
618 return -EINVAL;
619
620 if (handle) {
621 if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
622 return -EINVAL;
623 handle = htid | TC_U32_NODE(handle);
624 } else
625 handle = gen_new_kid(ht, htid);
626
627 if (tb[TCA_U32_SEL] == NULL)
628 return -EINVAL;
629
630 s = nla_data(tb[TCA_U32_SEL]);
631
632 n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
633 if (n == NULL)
634 return -ENOBUFS;
635
636#ifdef CONFIG_CLS_U32_PERF
637 n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
638 if (n->pf == NULL) {
639 kfree(n);
640 return -ENOBUFS;
641 }
642#endif
643
644 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
645 n->ht_up = ht;
646 n->handle = handle;
647 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
648
649#ifdef CONFIG_CLS_U32_MARK
650 if (tb[TCA_U32_MARK]) {
651 struct tc_u32_mark *mark;
652
653 mark = nla_data(tb[TCA_U32_MARK]);
654 memcpy(&n->mark, mark, sizeof(struct tc_u32_mark));
655 n->mark.success = 0;
656 }
657#endif
658
659 err = u32_set_parms(tp, base, ht, n, tb, tca[TCA_RATE]);
660 if (err == 0) {
661 struct tc_u_knode **ins;
662 for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
663 if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
664 break;
665
666 n->next = *ins;
667 tcf_tree_lock(tp);
668 *ins = n;
669 tcf_tree_unlock(tp);
670
671 *arg = (unsigned long)n;
672 return 0;
673 }
674#ifdef CONFIG_CLS_U32_PERF
675 kfree(n->pf);
676#endif
677 kfree(n);
678 return err;
679}
680
681static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
682{
683 struct tc_u_common *tp_c = tp->data;
684 struct tc_u_hnode *ht;
685 struct tc_u_knode *n;
686 unsigned int h;
687
688 if (arg->stop)
689 return;
690
691 for (ht = tp_c->hlist; ht; ht = ht->next) {
692 if (ht->prio != tp->prio)
693 continue;
694 if (arg->count >= arg->skip) {
695 if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
696 arg->stop = 1;
697 return;
698 }
699 }
700 arg->count++;
701 for (h = 0; h <= ht->divisor; h++) {
702 for (n = ht->ht[h]; n; n = n->next) {
703 if (arg->count < arg->skip) {
704 arg->count++;
705 continue;
706 }
707 if (arg->fn(tp, (unsigned long)n, arg) < 0) {
708 arg->stop = 1;
709 return;
710 }
711 arg->count++;
712 }
713 }
714 }
715}
716
717static int u32_dump(struct tcf_proto *tp, unsigned long fh,
718 struct sk_buff *skb, struct tcmsg *t)
719{
720 struct tc_u_knode *n = (struct tc_u_knode *)fh;
721 struct nlattr *nest;
722
723 if (n == NULL)
724 return skb->len;
725
726 t->tcm_handle = n->handle;
727
728 nest = nla_nest_start(skb, TCA_OPTIONS);
729 if (nest == NULL)
730 goto nla_put_failure;
731
732 if (TC_U32_KEY(n->handle) == 0) {
733 struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
734 u32 divisor = ht->divisor + 1;
735
736 NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor);
737 } else {
738 NLA_PUT(skb, TCA_U32_SEL,
739 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
740 &n->sel);
741 if (n->ht_up) {
742 u32 htid = n->handle & 0xFFFFF000;
743 NLA_PUT_U32(skb, TCA_U32_HASH, htid);
744 }
745 if (n->res.classid)
746 NLA_PUT_U32(skb, TCA_U32_CLASSID, n->res.classid);
747 if (n->ht_down)
748 NLA_PUT_U32(skb, TCA_U32_LINK, n->ht_down->handle);
749
750#ifdef CONFIG_CLS_U32_MARK
751 if (n->mark.val || n->mark.mask)
752 NLA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark);
753#endif
754
755 if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0)
756 goto nla_put_failure;
757
758#ifdef CONFIG_NET_CLS_IND
759 if (strlen(n->indev))
760 NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev);
761#endif
762#ifdef CONFIG_CLS_U32_PERF
763 NLA_PUT(skb, TCA_U32_PCNT,
764 sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
765 n->pf);
766#endif
767 }
768
769 nla_nest_end(skb, nest);
770
771 if (TC_U32_KEY(n->handle))
772 if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0)
773 goto nla_put_failure;
774 return skb->len;
775
776nla_put_failure:
777 nla_nest_cancel(skb, nest);
778 return -1;
779}
780
781static struct tcf_proto_ops cls_u32_ops __read_mostly = {
782 .kind = "u32",
783 .classify = u32_classify,
784 .init = u32_init,
785 .destroy = u32_destroy,
786 .get = u32_get,
787 .put = u32_put,
788 .change = u32_change,
789 .delete = u32_delete,
790 .walk = u32_walk,
791 .dump = u32_dump,
792 .owner = THIS_MODULE,
793};
794
795static int __init init_u32(void)
796{
797 pr_info("u32 classifier\n");
798#ifdef CONFIG_CLS_U32_PERF
799 pr_info(" Performance counters on\n");
800#endif
801#ifdef CONFIG_NET_CLS_IND
802 pr_info(" input device check on\n");
803#endif
804#ifdef CONFIG_NET_CLS_ACT
805 pr_info(" Actions configured\n");
806#endif
807 return register_tcf_proto_ops(&cls_u32_ops);
808}
809
810static void __exit exit_u32(void)
811{
812 unregister_tcf_proto_ops(&cls_u32_ops);
813}
814
815module_init(init_u32)
816module_exit(exit_u32)
817MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * The filters are packed to hash tables of key nodes
8 * with a set of 32bit key/mask pairs at every node.
9 * Nodes reference next level hash tables etc.
10 *
11 * This scheme is the best universal classifier I managed to
12 * invent; it is not super-fast, but it is not slow (provided you
13 * program it correctly), and general enough. And its relative
14 * speed grows as the number of rules becomes larger.
15 *
16 * It seems that it represents the best middle point between
17 * speed and manageability both by human and by machine.
18 *
19 * It is especially useful for link sharing combined with QoS;
20 * pure RSVP doesn't need such a general approach and can use
21 * much simpler (and faster) schemes, sort of cls_rsvp.c.
22 *
23 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
24 */
25
26#include <linux/module.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29#include <linux/kernel.h>
30#include <linux/string.h>
31#include <linux/errno.h>
32#include <linux/percpu.h>
33#include <linux/rtnetlink.h>
34#include <linux/skbuff.h>
35#include <linux/bitmap.h>
36#include <linux/netdevice.h>
37#include <linux/hash.h>
38#include <net/netlink.h>
39#include <net/act_api.h>
40#include <net/pkt_cls.h>
41#include <linux/idr.h>
42#include <net/tc_wrapper.h>
43
44struct tc_u_knode {
45 struct tc_u_knode __rcu *next;
46 u32 handle;
47 struct tc_u_hnode __rcu *ht_up;
48 struct tcf_exts exts;
49 int ifindex;
50 u8 fshift;
51 struct tcf_result res;
52 struct tc_u_hnode __rcu *ht_down;
53#ifdef CONFIG_CLS_U32_PERF
54 struct tc_u32_pcnt __percpu *pf;
55#endif
56 u32 flags;
57 unsigned int in_hw_count;
58#ifdef CONFIG_CLS_U32_MARK
59 u32 val;
60 u32 mask;
61 u32 __percpu *pcpu_success;
62#endif
63 struct rcu_work rwork;
64 /* The 'sel' field MUST be the last field in structure to allow for
65 * tc_u32_keys allocated at end of structure.
66 */
67 struct tc_u32_sel sel;
68};
69
70struct tc_u_hnode {
71 struct tc_u_hnode __rcu *next;
72 u32 handle;
73 u32 prio;
74 int refcnt;
75 unsigned int divisor;
76 struct idr handle_idr;
77 bool is_root;
78 struct rcu_head rcu;
79 u32 flags;
80 /* The 'ht' field MUST be the last field in structure to allow for
81 * more entries allocated at end of structure.
82 */
83 struct tc_u_knode __rcu *ht[];
84};
85
86struct tc_u_common {
87 struct tc_u_hnode __rcu *hlist;
88 void *ptr;
89 int refcnt;
90 struct idr handle_idr;
91 struct hlist_node hnode;
92 long knodes;
93};
94
95static inline unsigned int u32_hash_fold(__be32 key,
96 const struct tc_u32_sel *sel,
97 u8 fshift)
98{
99 unsigned int h = ntohl(key & sel->hmask) >> fshift;
100
101 return h;
102}
103
104TC_INDIRECT_SCOPE int u32_classify(struct sk_buff *skb,
105 const struct tcf_proto *tp,
106 struct tcf_result *res)
107{
108 struct {
109 struct tc_u_knode *knode;
110 unsigned int off;
111 } stack[TC_U32_MAXDEPTH];
112
113 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
114 unsigned int off = skb_network_offset(skb);
115 struct tc_u_knode *n;
116 int sdepth = 0;
117 int off2 = 0;
118 int sel = 0;
119#ifdef CONFIG_CLS_U32_PERF
120 int j;
121#endif
122 int i, r;
123
124next_ht:
125 n = rcu_dereference_bh(ht->ht[sel]);
126
127next_knode:
128 if (n) {
129 struct tc_u32_key *key = n->sel.keys;
130
131#ifdef CONFIG_CLS_U32_PERF
132 __this_cpu_inc(n->pf->rcnt);
133 j = 0;
134#endif
135
136 if (tc_skip_sw(n->flags)) {
137 n = rcu_dereference_bh(n->next);
138 goto next_knode;
139 }
140
141#ifdef CONFIG_CLS_U32_MARK
142 if ((skb->mark & n->mask) != n->val) {
143 n = rcu_dereference_bh(n->next);
144 goto next_knode;
145 } else {
146 __this_cpu_inc(*n->pcpu_success);
147 }
148#endif
149
150 for (i = n->sel.nkeys; i > 0; i--, key++) {
151 int toff = off + key->off + (off2 & key->offmask);
152 __be32 *data, hdata;
153
154 if (skb_headroom(skb) + toff > INT_MAX)
155 goto out;
156
157 data = skb_header_pointer(skb, toff, 4, &hdata);
158 if (!data)
159 goto out;
160 if ((*data ^ key->val) & key->mask) {
161 n = rcu_dereference_bh(n->next);
162 goto next_knode;
163 }
164#ifdef CONFIG_CLS_U32_PERF
165 __this_cpu_inc(n->pf->kcnts[j]);
166 j++;
167#endif
168 }
169
170 ht = rcu_dereference_bh(n->ht_down);
171 if (!ht) {
172check_terminal:
173 if (n->sel.flags & TC_U32_TERMINAL) {
174
175 *res = n->res;
176 if (!tcf_match_indev(skb, n->ifindex)) {
177 n = rcu_dereference_bh(n->next);
178 goto next_knode;
179 }
180#ifdef CONFIG_CLS_U32_PERF
181 __this_cpu_inc(n->pf->rhit);
182#endif
183 r = tcf_exts_exec(skb, &n->exts, res);
184 if (r < 0) {
185 n = rcu_dereference_bh(n->next);
186 goto next_knode;
187 }
188
189 return r;
190 }
191 n = rcu_dereference_bh(n->next);
192 goto next_knode;
193 }
194
195 /* PUSH */
196 if (sdepth >= TC_U32_MAXDEPTH)
197 goto deadloop;
198 stack[sdepth].knode = n;
199 stack[sdepth].off = off;
200 sdepth++;
201
202 ht = rcu_dereference_bh(n->ht_down);
203 sel = 0;
204 if (ht->divisor) {
205 __be32 *data, hdata;
206
207 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
208 &hdata);
209 if (!data)
210 goto out;
211 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
212 n->fshift);
213 }
214 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
215 goto next_ht;
216
217 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
218 off2 = n->sel.off + 3;
219 if (n->sel.flags & TC_U32_VAROFFSET) {
220 __be16 *data, hdata;
221
222 data = skb_header_pointer(skb,
223 off + n->sel.offoff,
224 2, &hdata);
225 if (!data)
226 goto out;
227 off2 += ntohs(n->sel.offmask & *data) >>
228 n->sel.offshift;
229 }
230 off2 &= ~3;
231 }
232 if (n->sel.flags & TC_U32_EAT) {
233 off += off2;
234 off2 = 0;
235 }
236
237 if (off < skb->len)
238 goto next_ht;
239 }
240
241 /* POP */
242 if (sdepth--) {
243 n = stack[sdepth].knode;
244 ht = rcu_dereference_bh(n->ht_up);
245 off = stack[sdepth].off;
246 goto check_terminal;
247 }
248out:
249 return -1;
250
251deadloop:
252 net_warn_ratelimited("cls_u32: dead loop\n");
253 return -1;
254}
255
256static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
257{
258 struct tc_u_hnode *ht;
259
260 for (ht = rtnl_dereference(tp_c->hlist);
261 ht;
262 ht = rtnl_dereference(ht->next))
263 if (ht->handle == handle)
264 break;
265
266 return ht;
267}
268
269static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
270{
271 unsigned int sel;
272 struct tc_u_knode *n = NULL;
273
274 sel = TC_U32_HASH(handle);
275 if (sel > ht->divisor)
276 goto out;
277
278 for (n = rtnl_dereference(ht->ht[sel]);
279 n;
280 n = rtnl_dereference(n->next))
281 if (n->handle == handle)
282 break;
283out:
284 return n;
285}
286
287
288static void *u32_get(struct tcf_proto *tp, u32 handle)
289{
290 struct tc_u_hnode *ht;
291 struct tc_u_common *tp_c = tp->data;
292
293 if (TC_U32_HTID(handle) == TC_U32_ROOT)
294 ht = rtnl_dereference(tp->root);
295 else
296 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
297
298 if (!ht)
299 return NULL;
300
301 if (TC_U32_KEY(handle) == 0)
302 return ht;
303
304 return u32_lookup_key(ht, handle);
305}
306
307/* Protected by rtnl lock */
308static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
309{
310 int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
311 if (id < 0)
312 return 0;
313 return (id | 0x800U) << 20;
314}
315
316static struct hlist_head *tc_u_common_hash;
317
318#define U32_HASH_SHIFT 10
319#define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
320
321static void *tc_u_common_ptr(const struct tcf_proto *tp)
322{
323 struct tcf_block *block = tp->chain->block;
324
325 /* The block sharing is currently supported only
326 * for classless qdiscs. In that case we use block
327 * for tc_u_common identification. In case the
328 * block is not shared, block->q is a valid pointer
329 * and we can use that. That works for classful qdiscs.
330 */
331 if (tcf_block_shared(block))
332 return block;
333 else
334 return block->q;
335}
336
337static struct hlist_head *tc_u_hash(void *key)
338{
339 return tc_u_common_hash + hash_ptr(key, U32_HASH_SHIFT);
340}
341
342static struct tc_u_common *tc_u_common_find(void *key)
343{
344 struct tc_u_common *tc;
345 hlist_for_each_entry(tc, tc_u_hash(key), hnode) {
346 if (tc->ptr == key)
347 return tc;
348 }
349 return NULL;
350}
351
352static int u32_init(struct tcf_proto *tp)
353{
354 struct tc_u_hnode *root_ht;
355 void *key = tc_u_common_ptr(tp);
356 struct tc_u_common *tp_c = tc_u_common_find(key);
357
358 root_ht = kzalloc(struct_size(root_ht, ht, 1), GFP_KERNEL);
359 if (root_ht == NULL)
360 return -ENOBUFS;
361
362 root_ht->refcnt++;
363 root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
364 root_ht->prio = tp->prio;
365 root_ht->is_root = true;
366 idr_init(&root_ht->handle_idr);
367
368 if (tp_c == NULL) {
369 tp_c = kzalloc(struct_size(tp_c, hlist->ht, 1), GFP_KERNEL);
370 if (tp_c == NULL) {
371 kfree(root_ht);
372 return -ENOBUFS;
373 }
374 tp_c->ptr = key;
375 INIT_HLIST_NODE(&tp_c->hnode);
376 idr_init(&tp_c->handle_idr);
377
378 hlist_add_head(&tp_c->hnode, tc_u_hash(key));
379 }
380
381 tp_c->refcnt++;
382 RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
383 rcu_assign_pointer(tp_c->hlist, root_ht);
384
385 root_ht->refcnt++;
386 rcu_assign_pointer(tp->root, root_ht);
387 tp->data = tp_c;
388 return 0;
389}
390
391static void __u32_destroy_key(struct tc_u_knode *n)
392{
393 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
394
395 tcf_exts_destroy(&n->exts);
396 if (ht && --ht->refcnt == 0)
397 kfree(ht);
398 kfree(n);
399}
400
401static void u32_destroy_key(struct tc_u_knode *n, bool free_pf)
402{
403 tcf_exts_put_net(&n->exts);
404#ifdef CONFIG_CLS_U32_PERF
405 if (free_pf)
406 free_percpu(n->pf);
407#endif
408#ifdef CONFIG_CLS_U32_MARK
409 if (free_pf)
410 free_percpu(n->pcpu_success);
411#endif
412 __u32_destroy_key(n);
413}
414
415/* u32_delete_key_rcu should be called when free'ing a copied
416 * version of a tc_u_knode obtained from u32_init_knode(). When
417 * copies are obtained from u32_init_knode() the statistics are
418 * shared between the old and new copies to allow readers to
419 * continue to update the statistics during the copy. To support
420 * this the u32_delete_key_rcu variant does not free the percpu
421 * statistics.
422 */
423static void u32_delete_key_work(struct work_struct *work)
424{
425 struct tc_u_knode *key = container_of(to_rcu_work(work),
426 struct tc_u_knode,
427 rwork);
428 rtnl_lock();
429 u32_destroy_key(key, false);
430 rtnl_unlock();
431}
432
433/* u32_delete_key_freepf_rcu is the rcu callback variant
434 * that free's the entire structure including the statistics
435 * percpu variables. Only use this if the key is not a copy
436 * returned by u32_init_knode(). See u32_delete_key_rcu()
437 * for the variant that should be used with keys return from
438 * u32_init_knode()
439 */
440static void u32_delete_key_freepf_work(struct work_struct *work)
441{
442 struct tc_u_knode *key = container_of(to_rcu_work(work),
443 struct tc_u_knode,
444 rwork);
445 rtnl_lock();
446 u32_destroy_key(key, true);
447 rtnl_unlock();
448}
449
450static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
451{
452 struct tc_u_common *tp_c = tp->data;
453 struct tc_u_knode __rcu **kp;
454 struct tc_u_knode *pkp;
455 struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
456
457 if (ht) {
458 kp = &ht->ht[TC_U32_HASH(key->handle)];
459 for (pkp = rtnl_dereference(*kp); pkp;
460 kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
461 if (pkp == key) {
462 RCU_INIT_POINTER(*kp, key->next);
463 tp_c->knodes--;
464
465 tcf_unbind_filter(tp, &key->res);
466 idr_remove(&ht->handle_idr, key->handle);
467 tcf_exts_get_net(&key->exts);
468 tcf_queue_work(&key->rwork, u32_delete_key_freepf_work);
469 return 0;
470 }
471 }
472 }
473 WARN_ON(1);
474 return 0;
475}
476
477static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
478 struct netlink_ext_ack *extack)
479{
480 struct tcf_block *block = tp->chain->block;
481 struct tc_cls_u32_offload cls_u32 = {};
482
483 tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
484 cls_u32.command = TC_CLSU32_DELETE_HNODE;
485 cls_u32.hnode.divisor = h->divisor;
486 cls_u32.hnode.handle = h->handle;
487 cls_u32.hnode.prio = h->prio;
488
489 tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, false, true);
490}
491
492static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
493 u32 flags, struct netlink_ext_ack *extack)
494{
495 struct tcf_block *block = tp->chain->block;
496 struct tc_cls_u32_offload cls_u32 = {};
497 bool skip_sw = tc_skip_sw(flags);
498 bool offloaded = false;
499 int err;
500
501 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
502 cls_u32.command = TC_CLSU32_NEW_HNODE;
503 cls_u32.hnode.divisor = h->divisor;
504 cls_u32.hnode.handle = h->handle;
505 cls_u32.hnode.prio = h->prio;
506
507 err = tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, skip_sw, true);
508 if (err < 0) {
509 u32_clear_hw_hnode(tp, h, NULL);
510 return err;
511 } else if (err > 0) {
512 offloaded = true;
513 }
514
515 if (skip_sw && !offloaded)
516 return -EINVAL;
517
518 return 0;
519}
520
521static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
522 struct netlink_ext_ack *extack)
523{
524 struct tcf_block *block = tp->chain->block;
525 struct tc_cls_u32_offload cls_u32 = {};
526
527 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
528 cls_u32.command = TC_CLSU32_DELETE_KNODE;
529 cls_u32.knode.handle = n->handle;
530
531 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSU32, &cls_u32, false,
532 &n->flags, &n->in_hw_count, true);
533}
534
535static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
536 u32 flags, struct netlink_ext_ack *extack)
537{
538 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
539 struct tcf_block *block = tp->chain->block;
540 struct tc_cls_u32_offload cls_u32 = {};
541 bool skip_sw = tc_skip_sw(flags);
542 int err;
543
544 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
545 cls_u32.command = TC_CLSU32_REPLACE_KNODE;
546 cls_u32.knode.handle = n->handle;
547 cls_u32.knode.fshift = n->fshift;
548#ifdef CONFIG_CLS_U32_MARK
549 cls_u32.knode.val = n->val;
550 cls_u32.knode.mask = n->mask;
551#else
552 cls_u32.knode.val = 0;
553 cls_u32.knode.mask = 0;
554#endif
555 cls_u32.knode.sel = &n->sel;
556 cls_u32.knode.res = &n->res;
557 cls_u32.knode.exts = &n->exts;
558 if (n->ht_down)
559 cls_u32.knode.link_handle = ht->handle;
560
561 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSU32, &cls_u32, skip_sw,
562 &n->flags, &n->in_hw_count, true);
563 if (err) {
564 u32_remove_hw_knode(tp, n, NULL);
565 return err;
566 }
567
568 if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
569 return -EINVAL;
570
571 return 0;
572}
573
574static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
575 struct netlink_ext_ack *extack)
576{
577 struct tc_u_common *tp_c = tp->data;
578 struct tc_u_knode *n;
579 unsigned int h;
580
581 for (h = 0; h <= ht->divisor; h++) {
582 while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
583 RCU_INIT_POINTER(ht->ht[h],
584 rtnl_dereference(n->next));
585 tp_c->knodes--;
586 tcf_unbind_filter(tp, &n->res);
587 u32_remove_hw_knode(tp, n, extack);
588 idr_remove(&ht->handle_idr, n->handle);
589 if (tcf_exts_get_net(&n->exts))
590 tcf_queue_work(&n->rwork, u32_delete_key_freepf_work);
591 else
592 u32_destroy_key(n, true);
593 }
594 }
595}
596
597static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
598 struct netlink_ext_ack *extack)
599{
600 struct tc_u_common *tp_c = tp->data;
601 struct tc_u_hnode __rcu **hn;
602 struct tc_u_hnode *phn;
603
604 WARN_ON(--ht->refcnt);
605
606 u32_clear_hnode(tp, ht, extack);
607
608 hn = &tp_c->hlist;
609 for (phn = rtnl_dereference(*hn);
610 phn;
611 hn = &phn->next, phn = rtnl_dereference(*hn)) {
612 if (phn == ht) {
613 u32_clear_hw_hnode(tp, ht, extack);
614 idr_destroy(&ht->handle_idr);
615 idr_remove(&tp_c->handle_idr, ht->handle);
616 RCU_INIT_POINTER(*hn, ht->next);
617 kfree_rcu(ht, rcu);
618 return 0;
619 }
620 }
621
622 return -ENOENT;
623}
624
625static void u32_destroy(struct tcf_proto *tp, bool rtnl_held,
626 struct netlink_ext_ack *extack)
627{
628 struct tc_u_common *tp_c = tp->data;
629 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
630
631 WARN_ON(root_ht == NULL);
632
633 if (root_ht && --root_ht->refcnt == 1)
634 u32_destroy_hnode(tp, root_ht, extack);
635
636 if (--tp_c->refcnt == 0) {
637 struct tc_u_hnode *ht;
638
639 hlist_del(&tp_c->hnode);
640
641 while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
642 u32_clear_hnode(tp, ht, extack);
643 RCU_INIT_POINTER(tp_c->hlist, ht->next);
644
645 /* u32_destroy_key() will later free ht for us, if it's
646 * still referenced by some knode
647 */
648 if (--ht->refcnt == 0)
649 kfree_rcu(ht, rcu);
650 }
651
652 idr_destroy(&tp_c->handle_idr);
653 kfree(tp_c);
654 }
655
656 tp->data = NULL;
657}
658
659static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
660 bool rtnl_held, struct netlink_ext_ack *extack)
661{
662 struct tc_u_hnode *ht = arg;
663 struct tc_u_common *tp_c = tp->data;
664 int ret = 0;
665
666 if (TC_U32_KEY(ht->handle)) {
667 u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
668 ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
669 goto out;
670 }
671
672 if (ht->is_root) {
673 NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
674 return -EINVAL;
675 }
676
677 if (ht->refcnt == 1) {
678 u32_destroy_hnode(tp, ht, extack);
679 } else {
680 NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
681 return -EBUSY;
682 }
683
684out:
685 *last = tp_c->refcnt == 1 && tp_c->knodes == 0;
686 return ret;
687}
688
689static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
690{
691 u32 index = htid | 0x800;
692 u32 max = htid | 0xFFF;
693
694 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) {
695 index = htid + 1;
696 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max,
697 GFP_KERNEL))
698 index = max;
699 }
700
701 return index;
702}
703
704static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
705 [TCA_U32_CLASSID] = { .type = NLA_U32 },
706 [TCA_U32_HASH] = { .type = NLA_U32 },
707 [TCA_U32_LINK] = { .type = NLA_U32 },
708 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
709 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
710 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
711 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
712 [TCA_U32_FLAGS] = { .type = NLA_U32 },
713};
714
715static int u32_set_parms(struct net *net, struct tcf_proto *tp,
716 unsigned long base,
717 struct tc_u_knode *n, struct nlattr **tb,
718 struct nlattr *est, u32 flags, u32 fl_flags,
719 struct netlink_ext_ack *extack)
720{
721 int err;
722
723 err = tcf_exts_validate_ex(net, tp, tb, est, &n->exts, flags,
724 fl_flags, extack);
725 if (err < 0)
726 return err;
727
728 if (tb[TCA_U32_LINK]) {
729 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
730 struct tc_u_hnode *ht_down = NULL, *ht_old;
731
732 if (TC_U32_KEY(handle)) {
733 NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table");
734 return -EINVAL;
735 }
736
737 if (handle) {
738 ht_down = u32_lookup_ht(tp->data, handle);
739
740 if (!ht_down) {
741 NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
742 return -EINVAL;
743 }
744 if (ht_down->is_root) {
745 NL_SET_ERR_MSG_MOD(extack, "Not linking to root node");
746 return -EINVAL;
747 }
748 ht_down->refcnt++;
749 }
750
751 ht_old = rtnl_dereference(n->ht_down);
752 rcu_assign_pointer(n->ht_down, ht_down);
753
754 if (ht_old)
755 ht_old->refcnt--;
756 }
757 if (tb[TCA_U32_CLASSID]) {
758 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
759 tcf_bind_filter(tp, &n->res, base);
760 }
761
762 if (tb[TCA_U32_INDEV]) {
763 int ret;
764 ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
765 if (ret < 0)
766 return -EINVAL;
767 n->ifindex = ret;
768 }
769 return 0;
770}
771
772static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
773 struct tc_u_knode *n)
774{
775 struct tc_u_knode __rcu **ins;
776 struct tc_u_knode *pins;
777 struct tc_u_hnode *ht;
778
779 if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
780 ht = rtnl_dereference(tp->root);
781 else
782 ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
783
784 ins = &ht->ht[TC_U32_HASH(n->handle)];
785
786 /* The node must always exist for it to be replaced if this is not the
787 * case then something went very wrong elsewhere.
788 */
789 for (pins = rtnl_dereference(*ins); ;
790 ins = &pins->next, pins = rtnl_dereference(*ins))
791 if (pins->handle == n->handle)
792 break;
793
794 idr_replace(&ht->handle_idr, n, n->handle);
795 RCU_INIT_POINTER(n->next, pins->next);
796 rcu_assign_pointer(*ins, n);
797}
798
799static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
800 struct tc_u_knode *n)
801{
802 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
803 struct tc_u32_sel *s = &n->sel;
804 struct tc_u_knode *new;
805
806 new = kzalloc(struct_size(new, sel.keys, s->nkeys), GFP_KERNEL);
807 if (!new)
808 return NULL;
809
810 RCU_INIT_POINTER(new->next, n->next);
811 new->handle = n->handle;
812 RCU_INIT_POINTER(new->ht_up, n->ht_up);
813
814 new->ifindex = n->ifindex;
815 new->fshift = n->fshift;
816 new->res = n->res;
817 new->flags = n->flags;
818 RCU_INIT_POINTER(new->ht_down, ht);
819
820#ifdef CONFIG_CLS_U32_PERF
821 /* Statistics may be incremented by readers during update
822 * so we must keep them in tact. When the node is later destroyed
823 * a special destroy call must be made to not free the pf memory.
824 */
825 new->pf = n->pf;
826#endif
827
828#ifdef CONFIG_CLS_U32_MARK
829 new->val = n->val;
830 new->mask = n->mask;
831 /* Similarly success statistics must be moved as pointers */
832 new->pcpu_success = n->pcpu_success;
833#endif
834 memcpy(&new->sel, s, struct_size(s, keys, s->nkeys));
835
836 if (tcf_exts_init(&new->exts, net, TCA_U32_ACT, TCA_U32_POLICE)) {
837 kfree(new);
838 return NULL;
839 }
840
841 /* bump reference count as long as we hold pointer to structure */
842 if (ht)
843 ht->refcnt++;
844
845 return new;
846}
847
848static int u32_change(struct net *net, struct sk_buff *in_skb,
849 struct tcf_proto *tp, unsigned long base, u32 handle,
850 struct nlattr **tca, void **arg, u32 flags,
851 struct netlink_ext_ack *extack)
852{
853 struct tc_u_common *tp_c = tp->data;
854 struct tc_u_hnode *ht;
855 struct tc_u_knode *n;
856 struct tc_u32_sel *s;
857 struct nlattr *opt = tca[TCA_OPTIONS];
858 struct nlattr *tb[TCA_U32_MAX + 1];
859 u32 htid, userflags = 0;
860 size_t sel_size;
861 int err;
862
863 if (!opt) {
864 if (handle) {
865 NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options");
866 return -EINVAL;
867 } else {
868 return 0;
869 }
870 }
871
872 err = nla_parse_nested_deprecated(tb, TCA_U32_MAX, opt, u32_policy,
873 extack);
874 if (err < 0)
875 return err;
876
877 if (tb[TCA_U32_FLAGS]) {
878 userflags = nla_get_u32(tb[TCA_U32_FLAGS]);
879 if (!tc_flags_valid(userflags)) {
880 NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags");
881 return -EINVAL;
882 }
883 }
884
885 n = *arg;
886 if (n) {
887 struct tc_u_knode *new;
888
889 if (TC_U32_KEY(n->handle) == 0) {
890 NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero");
891 return -EINVAL;
892 }
893
894 if ((n->flags ^ userflags) &
895 ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) {
896 NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
897 return -EINVAL;
898 }
899
900 new = u32_init_knode(net, tp, n);
901 if (!new)
902 return -ENOMEM;
903
904 err = u32_set_parms(net, tp, base, new, tb,
905 tca[TCA_RATE], flags, new->flags,
906 extack);
907
908 if (err) {
909 __u32_destroy_key(new);
910 return err;
911 }
912
913 err = u32_replace_hw_knode(tp, new, flags, extack);
914 if (err) {
915 __u32_destroy_key(new);
916 return err;
917 }
918
919 if (!tc_in_hw(new->flags))
920 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
921
922 u32_replace_knode(tp, tp_c, new);
923 tcf_unbind_filter(tp, &n->res);
924 tcf_exts_get_net(&n->exts);
925 tcf_queue_work(&n->rwork, u32_delete_key_work);
926 return 0;
927 }
928
929 if (tb[TCA_U32_DIVISOR]) {
930 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
931
932 if (!is_power_of_2(divisor)) {
933 NL_SET_ERR_MSG_MOD(extack, "Divisor is not a power of 2");
934 return -EINVAL;
935 }
936 if (divisor-- > 0x100) {
937 NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
938 return -EINVAL;
939 }
940 if (TC_U32_KEY(handle)) {
941 NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
942 return -EINVAL;
943 }
944 ht = kzalloc(struct_size(ht, ht, divisor + 1), GFP_KERNEL);
945 if (ht == NULL)
946 return -ENOBUFS;
947 if (handle == 0) {
948 handle = gen_new_htid(tp->data, ht);
949 if (handle == 0) {
950 kfree(ht);
951 return -ENOMEM;
952 }
953 } else {
954 err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle,
955 handle, GFP_KERNEL);
956 if (err) {
957 kfree(ht);
958 return err;
959 }
960 }
961 ht->refcnt = 1;
962 ht->divisor = divisor;
963 ht->handle = handle;
964 ht->prio = tp->prio;
965 idr_init(&ht->handle_idr);
966 ht->flags = userflags;
967
968 err = u32_replace_hw_hnode(tp, ht, userflags, extack);
969 if (err) {
970 idr_remove(&tp_c->handle_idr, handle);
971 kfree(ht);
972 return err;
973 }
974
975 RCU_INIT_POINTER(ht->next, tp_c->hlist);
976 rcu_assign_pointer(tp_c->hlist, ht);
977 *arg = ht;
978
979 return 0;
980 }
981
982 if (tb[TCA_U32_HASH]) {
983 htid = nla_get_u32(tb[TCA_U32_HASH]);
984 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
985 ht = rtnl_dereference(tp->root);
986 htid = ht->handle;
987 } else {
988 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
989 if (!ht) {
990 NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found");
991 return -EINVAL;
992 }
993 }
994 } else {
995 ht = rtnl_dereference(tp->root);
996 htid = ht->handle;
997 }
998
999 if (ht->divisor < TC_U32_HASH(htid)) {
1000 NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value");
1001 return -EINVAL;
1002 }
1003
1004 if (handle) {
1005 if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
1006 NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
1007 return -EINVAL;
1008 }
1009 handle = htid | TC_U32_NODE(handle);
1010 err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle,
1011 GFP_KERNEL);
1012 if (err)
1013 return err;
1014 } else
1015 handle = gen_new_kid(ht, htid);
1016
1017 if (tb[TCA_U32_SEL] == NULL) {
1018 NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
1019 err = -EINVAL;
1020 goto erridr;
1021 }
1022
1023 s = nla_data(tb[TCA_U32_SEL]);
1024 sel_size = struct_size(s, keys, s->nkeys);
1025 if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
1026 err = -EINVAL;
1027 goto erridr;
1028 }
1029
1030 n = kzalloc(struct_size(n, sel.keys, s->nkeys), GFP_KERNEL);
1031 if (n == NULL) {
1032 err = -ENOBUFS;
1033 goto erridr;
1034 }
1035
1036#ifdef CONFIG_CLS_U32_PERF
1037 n->pf = __alloc_percpu(struct_size(n->pf, kcnts, s->nkeys),
1038 __alignof__(struct tc_u32_pcnt));
1039 if (!n->pf) {
1040 err = -ENOBUFS;
1041 goto errfree;
1042 }
1043#endif
1044
1045 unsafe_memcpy(&n->sel, s, sel_size,
1046 /* A composite flex-array structure destination,
1047 * which was correctly sized with struct_size(),
1048 * bounds-checked against nla_len(), and allocated
1049 * above. */);
1050 RCU_INIT_POINTER(n->ht_up, ht);
1051 n->handle = handle;
1052 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
1053 n->flags = userflags;
1054
1055 err = tcf_exts_init(&n->exts, net, TCA_U32_ACT, TCA_U32_POLICE);
1056 if (err < 0)
1057 goto errout;
1058
1059#ifdef CONFIG_CLS_U32_MARK
1060 n->pcpu_success = alloc_percpu(u32);
1061 if (!n->pcpu_success) {
1062 err = -ENOMEM;
1063 goto errout;
1064 }
1065
1066 if (tb[TCA_U32_MARK]) {
1067 struct tc_u32_mark *mark;
1068
1069 mark = nla_data(tb[TCA_U32_MARK]);
1070 n->val = mark->val;
1071 n->mask = mark->mask;
1072 }
1073#endif
1074
1075 err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE],
1076 flags, n->flags, extack);
1077 if (err == 0) {
1078 struct tc_u_knode __rcu **ins;
1079 struct tc_u_knode *pins;
1080
1081 err = u32_replace_hw_knode(tp, n, flags, extack);
1082 if (err)
1083 goto errhw;
1084
1085 if (!tc_in_hw(n->flags))
1086 n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1087
1088 ins = &ht->ht[TC_U32_HASH(handle)];
1089 for (pins = rtnl_dereference(*ins); pins;
1090 ins = &pins->next, pins = rtnl_dereference(*ins))
1091 if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
1092 break;
1093
1094 RCU_INIT_POINTER(n->next, pins);
1095 rcu_assign_pointer(*ins, n);
1096 tp_c->knodes++;
1097 *arg = n;
1098 return 0;
1099 }
1100
1101errhw:
1102#ifdef CONFIG_CLS_U32_MARK
1103 free_percpu(n->pcpu_success);
1104#endif
1105
1106errout:
1107 tcf_exts_destroy(&n->exts);
1108#ifdef CONFIG_CLS_U32_PERF
1109errfree:
1110 free_percpu(n->pf);
1111#endif
1112 kfree(n);
1113erridr:
1114 idr_remove(&ht->handle_idr, handle);
1115 return err;
1116}
1117
1118static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1119 bool rtnl_held)
1120{
1121 struct tc_u_common *tp_c = tp->data;
1122 struct tc_u_hnode *ht;
1123 struct tc_u_knode *n;
1124 unsigned int h;
1125
1126 if (arg->stop)
1127 return;
1128
1129 for (ht = rtnl_dereference(tp_c->hlist);
1130 ht;
1131 ht = rtnl_dereference(ht->next)) {
1132 if (ht->prio != tp->prio)
1133 continue;
1134
1135 if (!tc_cls_stats_dump(tp, arg, ht))
1136 return;
1137
1138 for (h = 0; h <= ht->divisor; h++) {
1139 for (n = rtnl_dereference(ht->ht[h]);
1140 n;
1141 n = rtnl_dereference(n->next)) {
1142 if (!tc_cls_stats_dump(tp, arg, n))
1143 return;
1144 }
1145 }
1146 }
1147}
1148
1149static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
1150 bool add, flow_setup_cb_t *cb, void *cb_priv,
1151 struct netlink_ext_ack *extack)
1152{
1153 struct tc_cls_u32_offload cls_u32 = {};
1154 int err;
1155
1156 tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
1157 cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
1158 cls_u32.hnode.divisor = ht->divisor;
1159 cls_u32.hnode.handle = ht->handle;
1160 cls_u32.hnode.prio = ht->prio;
1161
1162 err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
1163 if (err && add && tc_skip_sw(ht->flags))
1164 return err;
1165
1166 return 0;
1167}
1168
1169static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
1170 bool add, flow_setup_cb_t *cb, void *cb_priv,
1171 struct netlink_ext_ack *extack)
1172{
1173 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
1174 struct tcf_block *block = tp->chain->block;
1175 struct tc_cls_u32_offload cls_u32 = {};
1176
1177 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
1178 cls_u32.command = add ?
1179 TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
1180 cls_u32.knode.handle = n->handle;
1181
1182 if (add) {
1183 cls_u32.knode.fshift = n->fshift;
1184#ifdef CONFIG_CLS_U32_MARK
1185 cls_u32.knode.val = n->val;
1186 cls_u32.knode.mask = n->mask;
1187#else
1188 cls_u32.knode.val = 0;
1189 cls_u32.knode.mask = 0;
1190#endif
1191 cls_u32.knode.sel = &n->sel;
1192 cls_u32.knode.res = &n->res;
1193 cls_u32.knode.exts = &n->exts;
1194 if (n->ht_down)
1195 cls_u32.knode.link_handle = ht->handle;
1196 }
1197
1198 return tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32,
1199 &cls_u32, cb_priv, &n->flags,
1200 &n->in_hw_count);
1201}
1202
1203static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1204 void *cb_priv, struct netlink_ext_ack *extack)
1205{
1206 struct tc_u_common *tp_c = tp->data;
1207 struct tc_u_hnode *ht;
1208 struct tc_u_knode *n;
1209 unsigned int h;
1210 int err;
1211
1212 for (ht = rtnl_dereference(tp_c->hlist);
1213 ht;
1214 ht = rtnl_dereference(ht->next)) {
1215 if (ht->prio != tp->prio)
1216 continue;
1217
1218 /* When adding filters to a new dev, try to offload the
1219 * hashtable first. When removing, do the filters before the
1220 * hashtable.
1221 */
1222 if (add && !tc_skip_hw(ht->flags)) {
1223 err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
1224 extack);
1225 if (err)
1226 return err;
1227 }
1228
1229 for (h = 0; h <= ht->divisor; h++) {
1230 for (n = rtnl_dereference(ht->ht[h]);
1231 n;
1232 n = rtnl_dereference(n->next)) {
1233 if (tc_skip_hw(n->flags))
1234 continue;
1235
1236 err = u32_reoffload_knode(tp, n, add, cb,
1237 cb_priv, extack);
1238 if (err)
1239 return err;
1240 }
1241 }
1242
1243 if (!add && !tc_skip_hw(ht->flags))
1244 u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
1245 }
1246
1247 return 0;
1248}
1249
1250static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
1251 unsigned long base)
1252{
1253 struct tc_u_knode *n = fh;
1254
1255 tc_cls_bind_class(classid, cl, q, &n->res, base);
1256}
1257
1258static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
1259 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
1260{
1261 struct tc_u_knode *n = fh;
1262 struct tc_u_hnode *ht_up, *ht_down;
1263 struct nlattr *nest;
1264
1265 if (n == NULL)
1266 return skb->len;
1267
1268 t->tcm_handle = n->handle;
1269
1270 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1271 if (nest == NULL)
1272 goto nla_put_failure;
1273
1274 if (TC_U32_KEY(n->handle) == 0) {
1275 struct tc_u_hnode *ht = fh;
1276 u32 divisor = ht->divisor + 1;
1277
1278 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1279 goto nla_put_failure;
1280 } else {
1281#ifdef CONFIG_CLS_U32_PERF
1282 struct tc_u32_pcnt *gpf;
1283 int cpu;
1284#endif
1285
1286 if (nla_put(skb, TCA_U32_SEL, struct_size(&n->sel, keys, n->sel.nkeys),
1287 &n->sel))
1288 goto nla_put_failure;
1289
1290 ht_up = rtnl_dereference(n->ht_up);
1291 if (ht_up) {
1292 u32 htid = n->handle & 0xFFFFF000;
1293 if (nla_put_u32(skb, TCA_U32_HASH, htid))
1294 goto nla_put_failure;
1295 }
1296 if (n->res.classid &&
1297 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1298 goto nla_put_failure;
1299
1300 ht_down = rtnl_dereference(n->ht_down);
1301 if (ht_down &&
1302 nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
1303 goto nla_put_failure;
1304
1305 if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1306 goto nla_put_failure;
1307
1308#ifdef CONFIG_CLS_U32_MARK
1309 if ((n->val || n->mask)) {
1310 struct tc_u32_mark mark = {.val = n->val,
1311 .mask = n->mask,
1312 .success = 0};
1313 int cpum;
1314
1315 for_each_possible_cpu(cpum) {
1316 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
1317
1318 mark.success += cnt;
1319 }
1320
1321 if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1322 goto nla_put_failure;
1323 }
1324#endif
1325
1326 if (tcf_exts_dump(skb, &n->exts) < 0)
1327 goto nla_put_failure;
1328
1329 if (n->ifindex) {
1330 struct net_device *dev;
1331 dev = __dev_get_by_index(net, n->ifindex);
1332 if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1333 goto nla_put_failure;
1334 }
1335#ifdef CONFIG_CLS_U32_PERF
1336 gpf = kzalloc(struct_size(gpf, kcnts, n->sel.nkeys), GFP_KERNEL);
1337 if (!gpf)
1338 goto nla_put_failure;
1339
1340 for_each_possible_cpu(cpu) {
1341 int i;
1342 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1343
1344 gpf->rcnt += pf->rcnt;
1345 gpf->rhit += pf->rhit;
1346 for (i = 0; i < n->sel.nkeys; i++)
1347 gpf->kcnts[i] += pf->kcnts[i];
1348 }
1349
1350 if (nla_put_64bit(skb, TCA_U32_PCNT, struct_size(gpf, kcnts, n->sel.nkeys),
1351 gpf, TCA_U32_PAD)) {
1352 kfree(gpf);
1353 goto nla_put_failure;
1354 }
1355 kfree(gpf);
1356#endif
1357 }
1358
1359 nla_nest_end(skb, nest);
1360
1361 if (TC_U32_KEY(n->handle))
1362 if (tcf_exts_dump_stats(skb, &n->exts) < 0)
1363 goto nla_put_failure;
1364 return skb->len;
1365
1366nla_put_failure:
1367 nla_nest_cancel(skb, nest);
1368 return -1;
1369}
1370
1371static struct tcf_proto_ops cls_u32_ops __read_mostly = {
1372 .kind = "u32",
1373 .classify = u32_classify,
1374 .init = u32_init,
1375 .destroy = u32_destroy,
1376 .get = u32_get,
1377 .change = u32_change,
1378 .delete = u32_delete,
1379 .walk = u32_walk,
1380 .reoffload = u32_reoffload,
1381 .dump = u32_dump,
1382 .bind_class = u32_bind_class,
1383 .owner = THIS_MODULE,
1384};
1385
1386static int __init init_u32(void)
1387{
1388 int i, ret;
1389
1390 pr_info("u32 classifier\n");
1391#ifdef CONFIG_CLS_U32_PERF
1392 pr_info(" Performance counters on\n");
1393#endif
1394 pr_info(" input device check on\n");
1395#ifdef CONFIG_NET_CLS_ACT
1396 pr_info(" Actions configured\n");
1397#endif
1398 tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE,
1399 sizeof(struct hlist_head),
1400 GFP_KERNEL);
1401 if (!tc_u_common_hash)
1402 return -ENOMEM;
1403
1404 for (i = 0; i < U32_HASH_SIZE; i++)
1405 INIT_HLIST_HEAD(&tc_u_common_hash[i]);
1406
1407 ret = register_tcf_proto_ops(&cls_u32_ops);
1408 if (ret)
1409 kvfree(tc_u_common_hash);
1410 return ret;
1411}
1412
1413static void __exit exit_u32(void)
1414{
1415 unregister_tcf_proto_ops(&cls_u32_ops);
1416 kvfree(tc_u_common_hash);
1417}
1418
1419module_init(init_u32)
1420module_exit(exit_u32)
1421MODULE_LICENSE("GPL");