Loading...
1/*
2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * The filters are packed to hash tables of key nodes
12 * with a set of 32bit key/mask pairs at every node.
13 * Nodes reference next level hash tables etc.
14 *
15 * This scheme is the best universal classifier I managed to
16 * invent; it is not super-fast, but it is not slow (provided you
17 * program it correctly), and general enough. And its relative
18 * speed grows as the number of rules becomes larger.
19 *
20 * It seems that it represents the best middle point between
21 * speed and manageability both by human and by machine.
22 *
23 * It is especially useful for link sharing combined with QoS;
24 * pure RSVP doesn't need such a general approach and can use
25 * much simpler (and faster) schemes, sort of cls_rsvp.c.
26 *
27 * JHS: We should remove the CONFIG_NET_CLS_IND from here
28 * eventually when the meta match extension is made available
29 *
30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
31 */
32
33#include <linux/module.h>
34#include <linux/slab.h>
35#include <linux/types.h>
36#include <linux/kernel.h>
37#include <linux/string.h>
38#include <linux/errno.h>
39#include <linux/percpu.h>
40#include <linux/rtnetlink.h>
41#include <linux/skbuff.h>
42#include <linux/bitmap.h>
43#include <net/netlink.h>
44#include <net/act_api.h>
45#include <net/pkt_cls.h>
46#include <linux/netdevice.h>
47
48struct tc_u_knode {
49 struct tc_u_knode __rcu *next;
50 u32 handle;
51 struct tc_u_hnode __rcu *ht_up;
52 struct tcf_exts exts;
53#ifdef CONFIG_NET_CLS_IND
54 int ifindex;
55#endif
56 u8 fshift;
57 struct tcf_result res;
58 struct tc_u_hnode __rcu *ht_down;
59#ifdef CONFIG_CLS_U32_PERF
60 struct tc_u32_pcnt __percpu *pf;
61#endif
62 u32 flags;
63#ifdef CONFIG_CLS_U32_MARK
64 u32 val;
65 u32 mask;
66 u32 __percpu *pcpu_success;
67#endif
68 struct tcf_proto *tp;
69 struct rcu_head rcu;
70 /* The 'sel' field MUST be the last field in structure to allow for
71 * tc_u32_keys allocated at end of structure.
72 */
73 struct tc_u32_sel sel;
74};
75
76struct tc_u_hnode {
77 struct tc_u_hnode __rcu *next;
78 u32 handle;
79 u32 prio;
80 struct tc_u_common *tp_c;
81 int refcnt;
82 unsigned int divisor;
83 struct rcu_head rcu;
84 /* The 'ht' field MUST be the last field in structure to allow for
85 * more entries allocated at end of structure.
86 */
87 struct tc_u_knode __rcu *ht[1];
88};
89
90struct tc_u_common {
91 struct tc_u_hnode __rcu *hlist;
92 struct Qdisc *q;
93 int refcnt;
94 u32 hgenerator;
95 struct rcu_head rcu;
96};
97
98static inline unsigned int u32_hash_fold(__be32 key,
99 const struct tc_u32_sel *sel,
100 u8 fshift)
101{
102 unsigned int h = ntohl(key & sel->hmask) >> fshift;
103
104 return h;
105}
106
107static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res)
108{
109 struct {
110 struct tc_u_knode *knode;
111 unsigned int off;
112 } stack[TC_U32_MAXDEPTH];
113
114 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
115 unsigned int off = skb_network_offset(skb);
116 struct tc_u_knode *n;
117 int sdepth = 0;
118 int off2 = 0;
119 int sel = 0;
120#ifdef CONFIG_CLS_U32_PERF
121 int j;
122#endif
123 int i, r;
124
125next_ht:
126 n = rcu_dereference_bh(ht->ht[sel]);
127
128next_knode:
129 if (n) {
130 struct tc_u32_key *key = n->sel.keys;
131
132#ifdef CONFIG_CLS_U32_PERF
133 __this_cpu_inc(n->pf->rcnt);
134 j = 0;
135#endif
136
137#ifdef CONFIG_CLS_U32_MARK
138 if ((skb->mark & n->mask) != n->val) {
139 n = rcu_dereference_bh(n->next);
140 goto next_knode;
141 } else {
142 __this_cpu_inc(*n->pcpu_success);
143 }
144#endif
145
146 for (i = n->sel.nkeys; i > 0; i--, key++) {
147 int toff = off + key->off + (off2 & key->offmask);
148 __be32 *data, hdata;
149
150 if (skb_headroom(skb) + toff > INT_MAX)
151 goto out;
152
153 data = skb_header_pointer(skb, toff, 4, &hdata);
154 if (!data)
155 goto out;
156 if ((*data ^ key->val) & key->mask) {
157 n = rcu_dereference_bh(n->next);
158 goto next_knode;
159 }
160#ifdef CONFIG_CLS_U32_PERF
161 __this_cpu_inc(n->pf->kcnts[j]);
162 j++;
163#endif
164 }
165
166 ht = rcu_dereference_bh(n->ht_down);
167 if (!ht) {
168check_terminal:
169 if (n->sel.flags & TC_U32_TERMINAL) {
170
171 *res = n->res;
172#ifdef CONFIG_NET_CLS_IND
173 if (!tcf_match_indev(skb, n->ifindex)) {
174 n = rcu_dereference_bh(n->next);
175 goto next_knode;
176 }
177#endif
178#ifdef CONFIG_CLS_U32_PERF
179 __this_cpu_inc(n->pf->rhit);
180#endif
181 r = tcf_exts_exec(skb, &n->exts, res);
182 if (r < 0) {
183 n = rcu_dereference_bh(n->next);
184 goto next_knode;
185 }
186
187 return r;
188 }
189 n = rcu_dereference_bh(n->next);
190 goto next_knode;
191 }
192
193 /* PUSH */
194 if (sdepth >= TC_U32_MAXDEPTH)
195 goto deadloop;
196 stack[sdepth].knode = n;
197 stack[sdepth].off = off;
198 sdepth++;
199
200 ht = rcu_dereference_bh(n->ht_down);
201 sel = 0;
202 if (ht->divisor) {
203 __be32 *data, hdata;
204
205 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
206 &hdata);
207 if (!data)
208 goto out;
209 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
210 n->fshift);
211 }
212 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
213 goto next_ht;
214
215 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
216 off2 = n->sel.off + 3;
217 if (n->sel.flags & TC_U32_VAROFFSET) {
218 __be16 *data, hdata;
219
220 data = skb_header_pointer(skb,
221 off + n->sel.offoff,
222 2, &hdata);
223 if (!data)
224 goto out;
225 off2 += ntohs(n->sel.offmask & *data) >>
226 n->sel.offshift;
227 }
228 off2 &= ~3;
229 }
230 if (n->sel.flags & TC_U32_EAT) {
231 off += off2;
232 off2 = 0;
233 }
234
235 if (off < skb->len)
236 goto next_ht;
237 }
238
239 /* POP */
240 if (sdepth--) {
241 n = stack[sdepth].knode;
242 ht = rcu_dereference_bh(n->ht_up);
243 off = stack[sdepth].off;
244 goto check_terminal;
245 }
246out:
247 return -1;
248
249deadloop:
250 net_warn_ratelimited("cls_u32: dead loop\n");
251 return -1;
252}
253
254static struct tc_u_hnode *
255u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
256{
257 struct tc_u_hnode *ht;
258
259 for (ht = rtnl_dereference(tp_c->hlist);
260 ht;
261 ht = rtnl_dereference(ht->next))
262 if (ht->handle == handle)
263 break;
264
265 return ht;
266}
267
268static struct tc_u_knode *
269u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
270{
271 unsigned int sel;
272 struct tc_u_knode *n = NULL;
273
274 sel = TC_U32_HASH(handle);
275 if (sel > ht->divisor)
276 goto out;
277
278 for (n = rtnl_dereference(ht->ht[sel]);
279 n;
280 n = rtnl_dereference(n->next))
281 if (n->handle == handle)
282 break;
283out:
284 return n;
285}
286
287
288static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
289{
290 struct tc_u_hnode *ht;
291 struct tc_u_common *tp_c = tp->data;
292
293 if (TC_U32_HTID(handle) == TC_U32_ROOT)
294 ht = rtnl_dereference(tp->root);
295 else
296 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
297
298 if (!ht)
299 return 0;
300
301 if (TC_U32_KEY(handle) == 0)
302 return (unsigned long)ht;
303
304 return (unsigned long)u32_lookup_key(ht, handle);
305}
306
307static u32 gen_new_htid(struct tc_u_common *tp_c)
308{
309 int i = 0x800;
310
311 /* hgenerator only used inside rtnl lock it is safe to increment
312 * without read _copy_ update semantics
313 */
314 do {
315 if (++tp_c->hgenerator == 0x7FF)
316 tp_c->hgenerator = 1;
317 } while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
318
319 return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
320}
321
322static int u32_init(struct tcf_proto *tp)
323{
324 struct tc_u_hnode *root_ht;
325 struct tc_u_common *tp_c;
326
327 tp_c = tp->q->u32_node;
328
329 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
330 if (root_ht == NULL)
331 return -ENOBUFS;
332
333 root_ht->divisor = 0;
334 root_ht->refcnt++;
335 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
336 root_ht->prio = tp->prio;
337
338 if (tp_c == NULL) {
339 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
340 if (tp_c == NULL) {
341 kfree(root_ht);
342 return -ENOBUFS;
343 }
344 tp_c->q = tp->q;
345 tp->q->u32_node = tp_c;
346 }
347
348 tp_c->refcnt++;
349 RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
350 rcu_assign_pointer(tp_c->hlist, root_ht);
351 root_ht->tp_c = tp_c;
352
353 rcu_assign_pointer(tp->root, root_ht);
354 tp->data = tp_c;
355 return 0;
356}
357
358static int u32_destroy_key(struct tcf_proto *tp,
359 struct tc_u_knode *n,
360 bool free_pf)
361{
362 tcf_exts_destroy(&n->exts);
363 if (n->ht_down)
364 n->ht_down->refcnt--;
365#ifdef CONFIG_CLS_U32_PERF
366 if (free_pf)
367 free_percpu(n->pf);
368#endif
369#ifdef CONFIG_CLS_U32_MARK
370 if (free_pf)
371 free_percpu(n->pcpu_success);
372#endif
373 kfree(n);
374 return 0;
375}
376
377/* u32_delete_key_rcu should be called when free'ing a copied
378 * version of a tc_u_knode obtained from u32_init_knode(). When
379 * copies are obtained from u32_init_knode() the statistics are
380 * shared between the old and new copies to allow readers to
381 * continue to update the statistics during the copy. To support
382 * this the u32_delete_key_rcu variant does not free the percpu
383 * statistics.
384 */
385static void u32_delete_key_rcu(struct rcu_head *rcu)
386{
387 struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
388
389 u32_destroy_key(key->tp, key, false);
390}
391
392/* u32_delete_key_freepf_rcu is the rcu callback variant
393 * that free's the entire structure including the statistics
394 * percpu variables. Only use this if the key is not a copy
395 * returned by u32_init_knode(). See u32_delete_key_rcu()
396 * for the variant that should be used with keys return from
397 * u32_init_knode()
398 */
399static void u32_delete_key_freepf_rcu(struct rcu_head *rcu)
400{
401 struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
402
403 u32_destroy_key(key->tp, key, true);
404}
405
406static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
407{
408 struct tc_u_knode __rcu **kp;
409 struct tc_u_knode *pkp;
410 struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
411
412 if (ht) {
413 kp = &ht->ht[TC_U32_HASH(key->handle)];
414 for (pkp = rtnl_dereference(*kp); pkp;
415 kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
416 if (pkp == key) {
417 RCU_INIT_POINTER(*kp, key->next);
418
419 tcf_unbind_filter(tp, &key->res);
420 call_rcu(&key->rcu, u32_delete_key_freepf_rcu);
421 return 0;
422 }
423 }
424 }
425 WARN_ON(1);
426 return 0;
427}
428
429static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
430{
431 struct net_device *dev = tp->q->dev_queue->dev;
432 struct tc_cls_u32_offload u32_offload = {0};
433 struct tc_to_netdev offload;
434
435 offload.type = TC_SETUP_CLSU32;
436 offload.cls_u32 = &u32_offload;
437
438 if (tc_should_offload(dev, 0)) {
439 offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
440 offload.cls_u32->knode.handle = handle;
441 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
442 tp->protocol, &offload);
443 }
444}
445
446static void u32_replace_hw_hnode(struct tcf_proto *tp,
447 struct tc_u_hnode *h,
448 u32 flags)
449{
450 struct net_device *dev = tp->q->dev_queue->dev;
451 struct tc_cls_u32_offload u32_offload = {0};
452 struct tc_to_netdev offload;
453
454 offload.type = TC_SETUP_CLSU32;
455 offload.cls_u32 = &u32_offload;
456
457 if (tc_should_offload(dev, flags)) {
458 offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
459 offload.cls_u32->hnode.divisor = h->divisor;
460 offload.cls_u32->hnode.handle = h->handle;
461 offload.cls_u32->hnode.prio = h->prio;
462
463 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
464 tp->protocol, &offload);
465 }
466}
467
468static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
469{
470 struct net_device *dev = tp->q->dev_queue->dev;
471 struct tc_cls_u32_offload u32_offload = {0};
472 struct tc_to_netdev offload;
473
474 offload.type = TC_SETUP_CLSU32;
475 offload.cls_u32 = &u32_offload;
476
477 if (tc_should_offload(dev, 0)) {
478 offload.cls_u32->command = TC_CLSU32_DELETE_HNODE;
479 offload.cls_u32->hnode.divisor = h->divisor;
480 offload.cls_u32->hnode.handle = h->handle;
481 offload.cls_u32->hnode.prio = h->prio;
482
483 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
484 tp->protocol, &offload);
485 }
486}
487
488static void u32_replace_hw_knode(struct tcf_proto *tp,
489 struct tc_u_knode *n,
490 u32 flags)
491{
492 struct net_device *dev = tp->q->dev_queue->dev;
493 struct tc_cls_u32_offload u32_offload = {0};
494 struct tc_to_netdev offload;
495
496 offload.type = TC_SETUP_CLSU32;
497 offload.cls_u32 = &u32_offload;
498
499 if (tc_should_offload(dev, flags)) {
500 offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
501 offload.cls_u32->knode.handle = n->handle;
502 offload.cls_u32->knode.fshift = n->fshift;
503#ifdef CONFIG_CLS_U32_MARK
504 offload.cls_u32->knode.val = n->val;
505 offload.cls_u32->knode.mask = n->mask;
506#else
507 offload.cls_u32->knode.val = 0;
508 offload.cls_u32->knode.mask = 0;
509#endif
510 offload.cls_u32->knode.sel = &n->sel;
511 offload.cls_u32->knode.exts = &n->exts;
512 if (n->ht_down)
513 offload.cls_u32->knode.link_handle = n->ht_down->handle;
514
515 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
516 tp->protocol, &offload);
517 }
518}
519
520static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
521{
522 struct tc_u_knode *n;
523 unsigned int h;
524
525 for (h = 0; h <= ht->divisor; h++) {
526 while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
527 RCU_INIT_POINTER(ht->ht[h],
528 rtnl_dereference(n->next));
529 tcf_unbind_filter(tp, &n->res);
530 u32_remove_hw_knode(tp, n->handle);
531 call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
532 }
533 }
534}
535
536static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
537{
538 struct tc_u_common *tp_c = tp->data;
539 struct tc_u_hnode __rcu **hn;
540 struct tc_u_hnode *phn;
541
542 WARN_ON(ht->refcnt);
543
544 u32_clear_hnode(tp, ht);
545
546 hn = &tp_c->hlist;
547 for (phn = rtnl_dereference(*hn);
548 phn;
549 hn = &phn->next, phn = rtnl_dereference(*hn)) {
550 if (phn == ht) {
551 u32_clear_hw_hnode(tp, ht);
552 RCU_INIT_POINTER(*hn, ht->next);
553 kfree_rcu(ht, rcu);
554 return 0;
555 }
556 }
557
558 return -ENOENT;
559}
560
561static bool ht_empty(struct tc_u_hnode *ht)
562{
563 unsigned int h;
564
565 for (h = 0; h <= ht->divisor; h++)
566 if (rcu_access_pointer(ht->ht[h]))
567 return false;
568
569 return true;
570}
571
572static bool u32_destroy(struct tcf_proto *tp, bool force)
573{
574 struct tc_u_common *tp_c = tp->data;
575 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
576
577 WARN_ON(root_ht == NULL);
578
579 if (!force) {
580 if (root_ht) {
581 if (root_ht->refcnt > 1)
582 return false;
583 if (root_ht->refcnt == 1) {
584 if (!ht_empty(root_ht))
585 return false;
586 }
587 }
588
589 if (tp_c->refcnt > 1)
590 return false;
591
592 if (tp_c->refcnt == 1) {
593 struct tc_u_hnode *ht;
594
595 for (ht = rtnl_dereference(tp_c->hlist);
596 ht;
597 ht = rtnl_dereference(ht->next))
598 if (!ht_empty(ht))
599 return false;
600 }
601 }
602
603 if (root_ht && --root_ht->refcnt == 0)
604 u32_destroy_hnode(tp, root_ht);
605
606 if (--tp_c->refcnt == 0) {
607 struct tc_u_hnode *ht;
608
609 tp->q->u32_node = NULL;
610
611 for (ht = rtnl_dereference(tp_c->hlist);
612 ht;
613 ht = rtnl_dereference(ht->next)) {
614 ht->refcnt--;
615 u32_clear_hnode(tp, ht);
616 }
617
618 while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
619 RCU_INIT_POINTER(tp_c->hlist, ht->next);
620 kfree_rcu(ht, rcu);
621 }
622
623 kfree(tp_c);
624 }
625
626 tp->data = NULL;
627 return true;
628}
629
630static int u32_delete(struct tcf_proto *tp, unsigned long arg)
631{
632 struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
633 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
634
635 if (ht == NULL)
636 return 0;
637
638 if (TC_U32_KEY(ht->handle)) {
639 u32_remove_hw_knode(tp, ht->handle);
640 return u32_delete_key(tp, (struct tc_u_knode *)ht);
641 }
642
643 if (root_ht == ht)
644 return -EINVAL;
645
646 if (ht->refcnt == 1) {
647 ht->refcnt--;
648 u32_destroy_hnode(tp, ht);
649 } else {
650 return -EBUSY;
651 }
652
653 return 0;
654}
655
656#define NR_U32_NODE (1<<12)
657static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
658{
659 struct tc_u_knode *n;
660 unsigned long i;
661 unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
662 GFP_KERNEL);
663 if (!bitmap)
664 return handle | 0xFFF;
665
666 for (n = rtnl_dereference(ht->ht[TC_U32_HASH(handle)]);
667 n;
668 n = rtnl_dereference(n->next))
669 set_bit(TC_U32_NODE(n->handle), bitmap);
670
671 i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
672 if (i >= NR_U32_NODE)
673 i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
674
675 kfree(bitmap);
676 return handle | (i >= NR_U32_NODE ? 0xFFF : i);
677}
678
679static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
680 [TCA_U32_CLASSID] = { .type = NLA_U32 },
681 [TCA_U32_HASH] = { .type = NLA_U32 },
682 [TCA_U32_LINK] = { .type = NLA_U32 },
683 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
684 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
685 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
686 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
687 [TCA_U32_FLAGS] = { .type = NLA_U32 },
688};
689
690static int u32_set_parms(struct net *net, struct tcf_proto *tp,
691 unsigned long base, struct tc_u_hnode *ht,
692 struct tc_u_knode *n, struct nlattr **tb,
693 struct nlattr *est, bool ovr)
694{
695 int err;
696 struct tcf_exts e;
697
698 tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE);
699 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
700 if (err < 0)
701 return err;
702
703 err = -EINVAL;
704 if (tb[TCA_U32_LINK]) {
705 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
706 struct tc_u_hnode *ht_down = NULL, *ht_old;
707
708 if (TC_U32_KEY(handle))
709 goto errout;
710
711 if (handle) {
712 ht_down = u32_lookup_ht(ht->tp_c, handle);
713
714 if (ht_down == NULL)
715 goto errout;
716 ht_down->refcnt++;
717 }
718
719 ht_old = rtnl_dereference(n->ht_down);
720 rcu_assign_pointer(n->ht_down, ht_down);
721
722 if (ht_old)
723 ht_old->refcnt--;
724 }
725 if (tb[TCA_U32_CLASSID]) {
726 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
727 tcf_bind_filter(tp, &n->res, base);
728 }
729
730#ifdef CONFIG_NET_CLS_IND
731 if (tb[TCA_U32_INDEV]) {
732 int ret;
733 ret = tcf_change_indev(net, tb[TCA_U32_INDEV]);
734 if (ret < 0)
735 goto errout;
736 n->ifindex = ret;
737 }
738#endif
739 tcf_exts_change(tp, &n->exts, &e);
740
741 return 0;
742errout:
743 tcf_exts_destroy(&e);
744 return err;
745}
746
747static void u32_replace_knode(struct tcf_proto *tp,
748 struct tc_u_common *tp_c,
749 struct tc_u_knode *n)
750{
751 struct tc_u_knode __rcu **ins;
752 struct tc_u_knode *pins;
753 struct tc_u_hnode *ht;
754
755 if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
756 ht = rtnl_dereference(tp->root);
757 else
758 ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
759
760 ins = &ht->ht[TC_U32_HASH(n->handle)];
761
762 /* The node must always exist for it to be replaced if this is not the
763 * case then something went very wrong elsewhere.
764 */
765 for (pins = rtnl_dereference(*ins); ;
766 ins = &pins->next, pins = rtnl_dereference(*ins))
767 if (pins->handle == n->handle)
768 break;
769
770 RCU_INIT_POINTER(n->next, pins->next);
771 rcu_assign_pointer(*ins, n);
772}
773
774static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
775 struct tc_u_knode *n)
776{
777 struct tc_u_knode *new;
778 struct tc_u32_sel *s = &n->sel;
779
780 new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key),
781 GFP_KERNEL);
782
783 if (!new)
784 return NULL;
785
786 RCU_INIT_POINTER(new->next, n->next);
787 new->handle = n->handle;
788 RCU_INIT_POINTER(new->ht_up, n->ht_up);
789
790#ifdef CONFIG_NET_CLS_IND
791 new->ifindex = n->ifindex;
792#endif
793 new->fshift = n->fshift;
794 new->res = n->res;
795 new->flags = n->flags;
796 RCU_INIT_POINTER(new->ht_down, n->ht_down);
797
798 /* bump reference count as long as we hold pointer to structure */
799 if (new->ht_down)
800 new->ht_down->refcnt++;
801
802#ifdef CONFIG_CLS_U32_PERF
803 /* Statistics may be incremented by readers during update
804 * so we must keep them in tact. When the node is later destroyed
805 * a special destroy call must be made to not free the pf memory.
806 */
807 new->pf = n->pf;
808#endif
809
810#ifdef CONFIG_CLS_U32_MARK
811 new->val = n->val;
812 new->mask = n->mask;
813 /* Similarly success statistics must be moved as pointers */
814 new->pcpu_success = n->pcpu_success;
815#endif
816 new->tp = tp;
817 memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
818
819 tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE);
820
821 return new;
822}
823
824static int u32_change(struct net *net, struct sk_buff *in_skb,
825 struct tcf_proto *tp, unsigned long base, u32 handle,
826 struct nlattr **tca,
827 unsigned long *arg, bool ovr)
828{
829 struct tc_u_common *tp_c = tp->data;
830 struct tc_u_hnode *ht;
831 struct tc_u_knode *n;
832 struct tc_u32_sel *s;
833 struct nlattr *opt = tca[TCA_OPTIONS];
834 struct nlattr *tb[TCA_U32_MAX + 1];
835 u32 htid, flags = 0;
836 int err;
837#ifdef CONFIG_CLS_U32_PERF
838 size_t size;
839#endif
840
841 if (opt == NULL)
842 return handle ? -EINVAL : 0;
843
844 err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy);
845 if (err < 0)
846 return err;
847
848 if (tb[TCA_U32_FLAGS])
849 flags = nla_get_u32(tb[TCA_U32_FLAGS]);
850
851 n = (struct tc_u_knode *)*arg;
852 if (n) {
853 struct tc_u_knode *new;
854
855 if (TC_U32_KEY(n->handle) == 0)
856 return -EINVAL;
857
858 if (n->flags != flags)
859 return -EINVAL;
860
861 new = u32_init_knode(tp, n);
862 if (!new)
863 return -ENOMEM;
864
865 err = u32_set_parms(net, tp, base,
866 rtnl_dereference(n->ht_up), new, tb,
867 tca[TCA_RATE], ovr);
868
869 if (err) {
870 u32_destroy_key(tp, new, false);
871 return err;
872 }
873
874 u32_replace_knode(tp, tp_c, new);
875 tcf_unbind_filter(tp, &n->res);
876 call_rcu(&n->rcu, u32_delete_key_rcu);
877 u32_replace_hw_knode(tp, new, flags);
878 return 0;
879 }
880
881 if (tb[TCA_U32_DIVISOR]) {
882 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
883
884 if (--divisor > 0x100)
885 return -EINVAL;
886 if (TC_U32_KEY(handle))
887 return -EINVAL;
888 if (handle == 0) {
889 handle = gen_new_htid(tp->data);
890 if (handle == 0)
891 return -ENOMEM;
892 }
893 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
894 if (ht == NULL)
895 return -ENOBUFS;
896 ht->tp_c = tp_c;
897 ht->refcnt = 1;
898 ht->divisor = divisor;
899 ht->handle = handle;
900 ht->prio = tp->prio;
901 RCU_INIT_POINTER(ht->next, tp_c->hlist);
902 rcu_assign_pointer(tp_c->hlist, ht);
903 *arg = (unsigned long)ht;
904
905 u32_replace_hw_hnode(tp, ht, flags);
906 return 0;
907 }
908
909 if (tb[TCA_U32_HASH]) {
910 htid = nla_get_u32(tb[TCA_U32_HASH]);
911 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
912 ht = rtnl_dereference(tp->root);
913 htid = ht->handle;
914 } else {
915 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
916 if (ht == NULL)
917 return -EINVAL;
918 }
919 } else {
920 ht = rtnl_dereference(tp->root);
921 htid = ht->handle;
922 }
923
924 if (ht->divisor < TC_U32_HASH(htid))
925 return -EINVAL;
926
927 if (handle) {
928 if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
929 return -EINVAL;
930 handle = htid | TC_U32_NODE(handle);
931 } else
932 handle = gen_new_kid(ht, htid);
933
934 if (tb[TCA_U32_SEL] == NULL)
935 return -EINVAL;
936
937 s = nla_data(tb[TCA_U32_SEL]);
938
939 n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
940 if (n == NULL)
941 return -ENOBUFS;
942
943#ifdef CONFIG_CLS_U32_PERF
944 size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64);
945 n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt));
946 if (!n->pf) {
947 kfree(n);
948 return -ENOBUFS;
949 }
950#endif
951
952 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
953 RCU_INIT_POINTER(n->ht_up, ht);
954 n->handle = handle;
955 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
956 n->flags = flags;
957 tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
958 n->tp = tp;
959
960#ifdef CONFIG_CLS_U32_MARK
961 n->pcpu_success = alloc_percpu(u32);
962 if (!n->pcpu_success) {
963 err = -ENOMEM;
964 goto errout;
965 }
966
967 if (tb[TCA_U32_MARK]) {
968 struct tc_u32_mark *mark;
969
970 mark = nla_data(tb[TCA_U32_MARK]);
971 n->val = mark->val;
972 n->mask = mark->mask;
973 }
974#endif
975
976 err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr);
977 if (err == 0) {
978 struct tc_u_knode __rcu **ins;
979 struct tc_u_knode *pins;
980
981 ins = &ht->ht[TC_U32_HASH(handle)];
982 for (pins = rtnl_dereference(*ins); pins;
983 ins = &pins->next, pins = rtnl_dereference(*ins))
984 if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
985 break;
986
987 RCU_INIT_POINTER(n->next, pins);
988 rcu_assign_pointer(*ins, n);
989 u32_replace_hw_knode(tp, n, flags);
990 *arg = (unsigned long)n;
991 return 0;
992 }
993
994#ifdef CONFIG_CLS_U32_MARK
995 free_percpu(n->pcpu_success);
996errout:
997#endif
998
999#ifdef CONFIG_CLS_U32_PERF
1000 free_percpu(n->pf);
1001#endif
1002 kfree(n);
1003 return err;
1004}
1005
1006static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
1007{
1008 struct tc_u_common *tp_c = tp->data;
1009 struct tc_u_hnode *ht;
1010 struct tc_u_knode *n;
1011 unsigned int h;
1012
1013 if (arg->stop)
1014 return;
1015
1016 for (ht = rtnl_dereference(tp_c->hlist);
1017 ht;
1018 ht = rtnl_dereference(ht->next)) {
1019 if (ht->prio != tp->prio)
1020 continue;
1021 if (arg->count >= arg->skip) {
1022 if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
1023 arg->stop = 1;
1024 return;
1025 }
1026 }
1027 arg->count++;
1028 for (h = 0; h <= ht->divisor; h++) {
1029 for (n = rtnl_dereference(ht->ht[h]);
1030 n;
1031 n = rtnl_dereference(n->next)) {
1032 if (arg->count < arg->skip) {
1033 arg->count++;
1034 continue;
1035 }
1036 if (arg->fn(tp, (unsigned long)n, arg) < 0) {
1037 arg->stop = 1;
1038 return;
1039 }
1040 arg->count++;
1041 }
1042 }
1043 }
1044}
1045
1046static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
1047 struct sk_buff *skb, struct tcmsg *t)
1048{
1049 struct tc_u_knode *n = (struct tc_u_knode *)fh;
1050 struct tc_u_hnode *ht_up, *ht_down;
1051 struct nlattr *nest;
1052
1053 if (n == NULL)
1054 return skb->len;
1055
1056 t->tcm_handle = n->handle;
1057
1058 nest = nla_nest_start(skb, TCA_OPTIONS);
1059 if (nest == NULL)
1060 goto nla_put_failure;
1061
1062 if (TC_U32_KEY(n->handle) == 0) {
1063 struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
1064 u32 divisor = ht->divisor + 1;
1065
1066 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1067 goto nla_put_failure;
1068 } else {
1069#ifdef CONFIG_CLS_U32_PERF
1070 struct tc_u32_pcnt *gpf;
1071 int cpu;
1072#endif
1073
1074 if (nla_put(skb, TCA_U32_SEL,
1075 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
1076 &n->sel))
1077 goto nla_put_failure;
1078
1079 ht_up = rtnl_dereference(n->ht_up);
1080 if (ht_up) {
1081 u32 htid = n->handle & 0xFFFFF000;
1082 if (nla_put_u32(skb, TCA_U32_HASH, htid))
1083 goto nla_put_failure;
1084 }
1085 if (n->res.classid &&
1086 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1087 goto nla_put_failure;
1088
1089 ht_down = rtnl_dereference(n->ht_down);
1090 if (ht_down &&
1091 nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
1092 goto nla_put_failure;
1093
1094 if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1095 goto nla_put_failure;
1096
1097#ifdef CONFIG_CLS_U32_MARK
1098 if ((n->val || n->mask)) {
1099 struct tc_u32_mark mark = {.val = n->val,
1100 .mask = n->mask,
1101 .success = 0};
1102 int cpum;
1103
1104 for_each_possible_cpu(cpum) {
1105 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
1106
1107 mark.success += cnt;
1108 }
1109
1110 if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1111 goto nla_put_failure;
1112 }
1113#endif
1114
1115 if (tcf_exts_dump(skb, &n->exts) < 0)
1116 goto nla_put_failure;
1117
1118#ifdef CONFIG_NET_CLS_IND
1119 if (n->ifindex) {
1120 struct net_device *dev;
1121 dev = __dev_get_by_index(net, n->ifindex);
1122 if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1123 goto nla_put_failure;
1124 }
1125#endif
1126#ifdef CONFIG_CLS_U32_PERF
1127 gpf = kzalloc(sizeof(struct tc_u32_pcnt) +
1128 n->sel.nkeys * sizeof(u64),
1129 GFP_KERNEL);
1130 if (!gpf)
1131 goto nla_put_failure;
1132
1133 for_each_possible_cpu(cpu) {
1134 int i;
1135 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1136
1137 gpf->rcnt += pf->rcnt;
1138 gpf->rhit += pf->rhit;
1139 for (i = 0; i < n->sel.nkeys; i++)
1140 gpf->kcnts[i] += pf->kcnts[i];
1141 }
1142
1143 if (nla_put(skb, TCA_U32_PCNT,
1144 sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
1145 gpf)) {
1146 kfree(gpf);
1147 goto nla_put_failure;
1148 }
1149 kfree(gpf);
1150#endif
1151 }
1152
1153 nla_nest_end(skb, nest);
1154
1155 if (TC_U32_KEY(n->handle))
1156 if (tcf_exts_dump_stats(skb, &n->exts) < 0)
1157 goto nla_put_failure;
1158 return skb->len;
1159
1160nla_put_failure:
1161 nla_nest_cancel(skb, nest);
1162 return -1;
1163}
1164
1165static struct tcf_proto_ops cls_u32_ops __read_mostly = {
1166 .kind = "u32",
1167 .classify = u32_classify,
1168 .init = u32_init,
1169 .destroy = u32_destroy,
1170 .get = u32_get,
1171 .change = u32_change,
1172 .delete = u32_delete,
1173 .walk = u32_walk,
1174 .dump = u32_dump,
1175 .owner = THIS_MODULE,
1176};
1177
1178static int __init init_u32(void)
1179{
1180 pr_info("u32 classifier\n");
1181#ifdef CONFIG_CLS_U32_PERF
1182 pr_info(" Performance counters on\n");
1183#endif
1184#ifdef CONFIG_NET_CLS_IND
1185 pr_info(" input device check on\n");
1186#endif
1187#ifdef CONFIG_NET_CLS_ACT
1188 pr_info(" Actions configured\n");
1189#endif
1190 return register_tcf_proto_ops(&cls_u32_ops);
1191}
1192
1193static void __exit exit_u32(void)
1194{
1195 unregister_tcf_proto_ops(&cls_u32_ops);
1196}
1197
1198module_init(init_u32)
1199module_exit(exit_u32)
1200MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * The filters are packed to hash tables of key nodes
8 * with a set of 32bit key/mask pairs at every node.
9 * Nodes reference next level hash tables etc.
10 *
11 * This scheme is the best universal classifier I managed to
12 * invent; it is not super-fast, but it is not slow (provided you
13 * program it correctly), and general enough. And its relative
14 * speed grows as the number of rules becomes larger.
15 *
16 * It seems that it represents the best middle point between
17 * speed and manageability both by human and by machine.
18 *
19 * It is especially useful for link sharing combined with QoS;
20 * pure RSVP doesn't need such a general approach and can use
21 * much simpler (and faster) schemes, sort of cls_rsvp.c.
22 *
23 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
24 */
25
26#include <linux/module.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29#include <linux/kernel.h>
30#include <linux/string.h>
31#include <linux/errno.h>
32#include <linux/percpu.h>
33#include <linux/rtnetlink.h>
34#include <linux/skbuff.h>
35#include <linux/bitmap.h>
36#include <linux/netdevice.h>
37#include <linux/hash.h>
38#include <net/netlink.h>
39#include <net/act_api.h>
40#include <net/pkt_cls.h>
41#include <linux/idr.h>
42#include <net/tc_wrapper.h>
43
44struct tc_u_knode {
45 struct tc_u_knode __rcu *next;
46 u32 handle;
47 struct tc_u_hnode __rcu *ht_up;
48 struct tcf_exts exts;
49 int ifindex;
50 u8 fshift;
51 struct tcf_result res;
52 struct tc_u_hnode __rcu *ht_down;
53#ifdef CONFIG_CLS_U32_PERF
54 struct tc_u32_pcnt __percpu *pf;
55#endif
56 u32 flags;
57 unsigned int in_hw_count;
58#ifdef CONFIG_CLS_U32_MARK
59 u32 val;
60 u32 mask;
61 u32 __percpu *pcpu_success;
62#endif
63 struct rcu_work rwork;
64 /* The 'sel' field MUST be the last field in structure to allow for
65 * tc_u32_keys allocated at end of structure.
66 */
67 struct tc_u32_sel sel;
68};
69
70struct tc_u_hnode {
71 struct tc_u_hnode __rcu *next;
72 u32 handle;
73 u32 prio;
74 refcount_t refcnt;
75 unsigned int divisor;
76 struct idr handle_idr;
77 bool is_root;
78 struct rcu_head rcu;
79 u32 flags;
80 /* The 'ht' field MUST be the last field in structure to allow for
81 * more entries allocated at end of structure.
82 */
83 struct tc_u_knode __rcu *ht[];
84};
85
86struct tc_u_common {
87 struct tc_u_hnode __rcu *hlist;
88 void *ptr;
89 refcount_t refcnt;
90 struct idr handle_idr;
91 struct hlist_node hnode;
92 long knodes;
93};
94
95static u32 handle2id(u32 h)
96{
97 return ((h & 0x80000000) ? ((h >> 20) & 0x7FF) : h);
98}
99
100static u32 id2handle(u32 id)
101{
102 return (id | 0x800U) << 20;
103}
104
105static inline unsigned int u32_hash_fold(__be32 key,
106 const struct tc_u32_sel *sel,
107 u8 fshift)
108{
109 unsigned int h = ntohl(key & sel->hmask) >> fshift;
110
111 return h;
112}
113
114TC_INDIRECT_SCOPE int u32_classify(struct sk_buff *skb,
115 const struct tcf_proto *tp,
116 struct tcf_result *res)
117{
118 struct {
119 struct tc_u_knode *knode;
120 unsigned int off;
121 } stack[TC_U32_MAXDEPTH];
122
123 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
124 unsigned int off = skb_network_offset(skb);
125 struct tc_u_knode *n;
126 int sdepth = 0;
127 int off2 = 0;
128 int sel = 0;
129#ifdef CONFIG_CLS_U32_PERF
130 int j;
131#endif
132 int i, r;
133
134next_ht:
135 n = rcu_dereference_bh(ht->ht[sel]);
136
137next_knode:
138 if (n) {
139 struct tc_u32_key *key = n->sel.keys;
140
141#ifdef CONFIG_CLS_U32_PERF
142 __this_cpu_inc(n->pf->rcnt);
143 j = 0;
144#endif
145
146 if (tc_skip_sw(n->flags)) {
147 n = rcu_dereference_bh(n->next);
148 goto next_knode;
149 }
150
151#ifdef CONFIG_CLS_U32_MARK
152 if ((skb->mark & n->mask) != n->val) {
153 n = rcu_dereference_bh(n->next);
154 goto next_knode;
155 } else {
156 __this_cpu_inc(*n->pcpu_success);
157 }
158#endif
159
160 for (i = n->sel.nkeys; i > 0; i--, key++) {
161 int toff = off + key->off + (off2 & key->offmask);
162 __be32 *data, hdata;
163
164 if (skb_headroom(skb) + toff > INT_MAX)
165 goto out;
166
167 data = skb_header_pointer(skb, toff, 4, &hdata);
168 if (!data)
169 goto out;
170 if ((*data ^ key->val) & key->mask) {
171 n = rcu_dereference_bh(n->next);
172 goto next_knode;
173 }
174#ifdef CONFIG_CLS_U32_PERF
175 __this_cpu_inc(n->pf->kcnts[j]);
176 j++;
177#endif
178 }
179
180 ht = rcu_dereference_bh(n->ht_down);
181 if (!ht) {
182check_terminal:
183 if (n->sel.flags & TC_U32_TERMINAL) {
184
185 *res = n->res;
186 if (!tcf_match_indev(skb, n->ifindex)) {
187 n = rcu_dereference_bh(n->next);
188 goto next_knode;
189 }
190#ifdef CONFIG_CLS_U32_PERF
191 __this_cpu_inc(n->pf->rhit);
192#endif
193 r = tcf_exts_exec(skb, &n->exts, res);
194 if (r < 0) {
195 n = rcu_dereference_bh(n->next);
196 goto next_knode;
197 }
198
199 return r;
200 }
201 n = rcu_dereference_bh(n->next);
202 goto next_knode;
203 }
204
205 /* PUSH */
206 if (sdepth >= TC_U32_MAXDEPTH)
207 goto deadloop;
208 stack[sdepth].knode = n;
209 stack[sdepth].off = off;
210 sdepth++;
211
212 ht = rcu_dereference_bh(n->ht_down);
213 sel = 0;
214 if (ht->divisor) {
215 __be32 *data, hdata;
216
217 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
218 &hdata);
219 if (!data)
220 goto out;
221 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
222 n->fshift);
223 }
224 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
225 goto next_ht;
226
227 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
228 off2 = n->sel.off + 3;
229 if (n->sel.flags & TC_U32_VAROFFSET) {
230 __be16 *data, hdata;
231
232 data = skb_header_pointer(skb,
233 off + n->sel.offoff,
234 2, &hdata);
235 if (!data)
236 goto out;
237 off2 += ntohs(n->sel.offmask & *data) >>
238 n->sel.offshift;
239 }
240 off2 &= ~3;
241 }
242 if (n->sel.flags & TC_U32_EAT) {
243 off += off2;
244 off2 = 0;
245 }
246
247 if (off < skb->len)
248 goto next_ht;
249 }
250
251 /* POP */
252 if (sdepth--) {
253 n = stack[sdepth].knode;
254 ht = rcu_dereference_bh(n->ht_up);
255 off = stack[sdepth].off;
256 goto check_terminal;
257 }
258out:
259 return -1;
260
261deadloop:
262 net_warn_ratelimited("cls_u32: dead loop\n");
263 return -1;
264}
265
266static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
267{
268 struct tc_u_hnode *ht;
269
270 for (ht = rtnl_dereference(tp_c->hlist);
271 ht;
272 ht = rtnl_dereference(ht->next))
273 if (ht->handle == handle)
274 break;
275
276 return ht;
277}
278
279static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
280{
281 unsigned int sel;
282 struct tc_u_knode *n = NULL;
283
284 sel = TC_U32_HASH(handle);
285 if (sel > ht->divisor)
286 goto out;
287
288 for (n = rtnl_dereference(ht->ht[sel]);
289 n;
290 n = rtnl_dereference(n->next))
291 if (n->handle == handle)
292 break;
293out:
294 return n;
295}
296
297
298static void *u32_get(struct tcf_proto *tp, u32 handle)
299{
300 struct tc_u_hnode *ht;
301 struct tc_u_common *tp_c = tp->data;
302
303 if (TC_U32_HTID(handle) == TC_U32_ROOT)
304 ht = rtnl_dereference(tp->root);
305 else
306 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
307
308 if (!ht)
309 return NULL;
310
311 if (TC_U32_KEY(handle) == 0)
312 return ht;
313
314 return u32_lookup_key(ht, handle);
315}
316
317/* Protected by rtnl lock */
318static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
319{
320 int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
321 if (id < 0)
322 return 0;
323 return id2handle(id);
324}
325
326static struct hlist_head *tc_u_common_hash;
327
328#define U32_HASH_SHIFT 10
329#define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
330
331static void *tc_u_common_ptr(const struct tcf_proto *tp)
332{
333 struct tcf_block *block = tp->chain->block;
334
335 /* The block sharing is currently supported only
336 * for classless qdiscs. In that case we use block
337 * for tc_u_common identification. In case the
338 * block is not shared, block->q is a valid pointer
339 * and we can use that. That works for classful qdiscs.
340 */
341 if (tcf_block_shared(block))
342 return block;
343 else
344 return block->q;
345}
346
347static struct hlist_head *tc_u_hash(void *key)
348{
349 return tc_u_common_hash + hash_ptr(key, U32_HASH_SHIFT);
350}
351
352static struct tc_u_common *tc_u_common_find(void *key)
353{
354 struct tc_u_common *tc;
355 hlist_for_each_entry(tc, tc_u_hash(key), hnode) {
356 if (tc->ptr == key)
357 return tc;
358 }
359 return NULL;
360}
361
362static int u32_init(struct tcf_proto *tp)
363{
364 struct tc_u_hnode *root_ht;
365 void *key = tc_u_common_ptr(tp);
366 struct tc_u_common *tp_c = tc_u_common_find(key);
367
368 root_ht = kzalloc(struct_size(root_ht, ht, 1), GFP_KERNEL);
369 if (root_ht == NULL)
370 return -ENOBUFS;
371
372 refcount_set(&root_ht->refcnt, 1);
373 root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : id2handle(0);
374 root_ht->prio = tp->prio;
375 root_ht->is_root = true;
376 idr_init(&root_ht->handle_idr);
377
378 if (tp_c == NULL) {
379 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
380 if (tp_c == NULL) {
381 kfree(root_ht);
382 return -ENOBUFS;
383 }
384 refcount_set(&tp_c->refcnt, 1);
385 tp_c->ptr = key;
386 INIT_HLIST_NODE(&tp_c->hnode);
387 idr_init(&tp_c->handle_idr);
388
389 hlist_add_head(&tp_c->hnode, tc_u_hash(key));
390 } else {
391 refcount_inc(&tp_c->refcnt);
392 }
393
394 RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
395 rcu_assign_pointer(tp_c->hlist, root_ht);
396
397 /* root_ht must be destroyed when tcf_proto is destroyed */
398 rcu_assign_pointer(tp->root, root_ht);
399 tp->data = tp_c;
400 return 0;
401}
402
403static void __u32_destroy_key(struct tc_u_knode *n)
404{
405 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
406
407 tcf_exts_destroy(&n->exts);
408 if (ht && refcount_dec_and_test(&ht->refcnt))
409 kfree(ht);
410 kfree(n);
411}
412
413static void u32_destroy_key(struct tc_u_knode *n, bool free_pf)
414{
415 tcf_exts_put_net(&n->exts);
416#ifdef CONFIG_CLS_U32_PERF
417 if (free_pf)
418 free_percpu(n->pf);
419#endif
420#ifdef CONFIG_CLS_U32_MARK
421 if (free_pf)
422 free_percpu(n->pcpu_success);
423#endif
424 __u32_destroy_key(n);
425}
426
427/* u32_delete_key_rcu should be called when free'ing a copied
428 * version of a tc_u_knode obtained from u32_init_knode(). When
429 * copies are obtained from u32_init_knode() the statistics are
430 * shared between the old and new copies to allow readers to
431 * continue to update the statistics during the copy. To support
432 * this the u32_delete_key_rcu variant does not free the percpu
433 * statistics.
434 */
435static void u32_delete_key_work(struct work_struct *work)
436{
437 struct tc_u_knode *key = container_of(to_rcu_work(work),
438 struct tc_u_knode,
439 rwork);
440 rtnl_lock();
441 u32_destroy_key(key, false);
442 rtnl_unlock();
443}
444
445/* u32_delete_key_freepf_rcu is the rcu callback variant
446 * that free's the entire structure including the statistics
447 * percpu variables. Only use this if the key is not a copy
448 * returned by u32_init_knode(). See u32_delete_key_rcu()
449 * for the variant that should be used with keys return from
450 * u32_init_knode()
451 */
452static void u32_delete_key_freepf_work(struct work_struct *work)
453{
454 struct tc_u_knode *key = container_of(to_rcu_work(work),
455 struct tc_u_knode,
456 rwork);
457 rtnl_lock();
458 u32_destroy_key(key, true);
459 rtnl_unlock();
460}
461
462static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
463{
464 struct tc_u_common *tp_c = tp->data;
465 struct tc_u_knode __rcu **kp;
466 struct tc_u_knode *pkp;
467 struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
468
469 if (ht) {
470 kp = &ht->ht[TC_U32_HASH(key->handle)];
471 for (pkp = rtnl_dereference(*kp); pkp;
472 kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
473 if (pkp == key) {
474 RCU_INIT_POINTER(*kp, key->next);
475 tp_c->knodes--;
476
477 tcf_unbind_filter(tp, &key->res);
478 idr_remove(&ht->handle_idr, key->handle);
479 tcf_exts_get_net(&key->exts);
480 tcf_queue_work(&key->rwork, u32_delete_key_freepf_work);
481 return 0;
482 }
483 }
484 }
485 WARN_ON(1);
486 return 0;
487}
488
489static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
490 struct netlink_ext_ack *extack)
491{
492 struct tcf_block *block = tp->chain->block;
493 struct tc_cls_u32_offload cls_u32 = {};
494
495 tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
496 cls_u32.command = TC_CLSU32_DELETE_HNODE;
497 cls_u32.hnode.divisor = h->divisor;
498 cls_u32.hnode.handle = h->handle;
499 cls_u32.hnode.prio = h->prio;
500
501 tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, false, true);
502}
503
504static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
505 u32 flags, struct netlink_ext_ack *extack)
506{
507 struct tcf_block *block = tp->chain->block;
508 struct tc_cls_u32_offload cls_u32 = {};
509 bool skip_sw = tc_skip_sw(flags);
510 bool offloaded = false;
511 int err;
512
513 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
514 cls_u32.command = TC_CLSU32_NEW_HNODE;
515 cls_u32.hnode.divisor = h->divisor;
516 cls_u32.hnode.handle = h->handle;
517 cls_u32.hnode.prio = h->prio;
518
519 err = tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, skip_sw, true);
520 if (err < 0) {
521 u32_clear_hw_hnode(tp, h, NULL);
522 return err;
523 } else if (err > 0) {
524 offloaded = true;
525 }
526
527 if (skip_sw && !offloaded)
528 return -EINVAL;
529
530 return 0;
531}
532
533static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
534 struct netlink_ext_ack *extack)
535{
536 struct tcf_block *block = tp->chain->block;
537 struct tc_cls_u32_offload cls_u32 = {};
538
539 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
540 cls_u32.command = TC_CLSU32_DELETE_KNODE;
541 cls_u32.knode.handle = n->handle;
542
543 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSU32, &cls_u32, false,
544 &n->flags, &n->in_hw_count, true);
545}
546
547static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
548 u32 flags, struct netlink_ext_ack *extack)
549{
550 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
551 struct tcf_block *block = tp->chain->block;
552 struct tc_cls_u32_offload cls_u32 = {};
553 bool skip_sw = tc_skip_sw(flags);
554 int err;
555
556 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
557 cls_u32.command = TC_CLSU32_REPLACE_KNODE;
558 cls_u32.knode.handle = n->handle;
559 cls_u32.knode.fshift = n->fshift;
560#ifdef CONFIG_CLS_U32_MARK
561 cls_u32.knode.val = n->val;
562 cls_u32.knode.mask = n->mask;
563#else
564 cls_u32.knode.val = 0;
565 cls_u32.knode.mask = 0;
566#endif
567 cls_u32.knode.sel = &n->sel;
568 cls_u32.knode.res = &n->res;
569 cls_u32.knode.exts = &n->exts;
570 if (n->ht_down)
571 cls_u32.knode.link_handle = ht->handle;
572
573 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSU32, &cls_u32, skip_sw,
574 &n->flags, &n->in_hw_count, true);
575 if (err) {
576 u32_remove_hw_knode(tp, n, NULL);
577 return err;
578 }
579
580 if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
581 return -EINVAL;
582
583 return 0;
584}
585
586static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
587 struct netlink_ext_ack *extack)
588{
589 struct tc_u_common *tp_c = tp->data;
590 struct tc_u_knode *n;
591 unsigned int h;
592
593 for (h = 0; h <= ht->divisor; h++) {
594 while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
595 RCU_INIT_POINTER(ht->ht[h],
596 rtnl_dereference(n->next));
597 tp_c->knodes--;
598 tcf_unbind_filter(tp, &n->res);
599 u32_remove_hw_knode(tp, n, extack);
600 idr_remove(&ht->handle_idr, n->handle);
601 if (tcf_exts_get_net(&n->exts))
602 tcf_queue_work(&n->rwork, u32_delete_key_freepf_work);
603 else
604 u32_destroy_key(n, true);
605 }
606 }
607}
608
609static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
610 struct netlink_ext_ack *extack)
611{
612 struct tc_u_common *tp_c = tp->data;
613 struct tc_u_hnode __rcu **hn;
614 struct tc_u_hnode *phn;
615
616 u32_clear_hnode(tp, ht, extack);
617
618 hn = &tp_c->hlist;
619 for (phn = rtnl_dereference(*hn);
620 phn;
621 hn = &phn->next, phn = rtnl_dereference(*hn)) {
622 if (phn == ht) {
623 u32_clear_hw_hnode(tp, ht, extack);
624 idr_destroy(&ht->handle_idr);
625 idr_remove(&tp_c->handle_idr, handle2id(ht->handle));
626 RCU_INIT_POINTER(*hn, ht->next);
627 kfree_rcu(ht, rcu);
628 return 0;
629 }
630 }
631
632 return -ENOENT;
633}
634
635static void u32_destroy(struct tcf_proto *tp, bool rtnl_held,
636 struct netlink_ext_ack *extack)
637{
638 struct tc_u_common *tp_c = tp->data;
639 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
640
641 WARN_ON(root_ht == NULL);
642
643 if (root_ht && refcount_dec_and_test(&root_ht->refcnt))
644 u32_destroy_hnode(tp, root_ht, extack);
645
646 if (refcount_dec_and_test(&tp_c->refcnt)) {
647 struct tc_u_hnode *ht;
648
649 hlist_del(&tp_c->hnode);
650
651 while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
652 u32_clear_hnode(tp, ht, extack);
653 RCU_INIT_POINTER(tp_c->hlist, ht->next);
654
655 /* u32_destroy_key() will later free ht for us, if it's
656 * still referenced by some knode
657 */
658 if (refcount_dec_and_test(&ht->refcnt))
659 kfree_rcu(ht, rcu);
660 }
661
662 idr_destroy(&tp_c->handle_idr);
663 kfree(tp_c);
664 }
665
666 tp->data = NULL;
667}
668
669static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
670 bool rtnl_held, struct netlink_ext_ack *extack)
671{
672 struct tc_u_hnode *ht = arg;
673 struct tc_u_common *tp_c = tp->data;
674 int ret = 0;
675
676 if (TC_U32_KEY(ht->handle)) {
677 u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
678 ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
679 goto out;
680 }
681
682 if (ht->is_root) {
683 NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
684 return -EINVAL;
685 }
686
687 if (refcount_dec_if_one(&ht->refcnt)) {
688 u32_destroy_hnode(tp, ht, extack);
689 } else {
690 NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
691 return -EBUSY;
692 }
693
694out:
695 *last = refcount_read(&tp_c->refcnt) == 1 && tp_c->knodes == 0;
696 return ret;
697}
698
699static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
700{
701 u32 index = htid | 0x800;
702 u32 max = htid | 0xFFF;
703
704 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) {
705 index = htid + 1;
706 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max,
707 GFP_KERNEL))
708 index = max;
709 }
710
711 return index;
712}
713
714static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
715 [TCA_U32_CLASSID] = { .type = NLA_U32 },
716 [TCA_U32_HASH] = { .type = NLA_U32 },
717 [TCA_U32_LINK] = { .type = NLA_U32 },
718 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
719 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
720 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
721 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
722 [TCA_U32_FLAGS] = { .type = NLA_U32 },
723};
724
725static void u32_unbind_filter(struct tcf_proto *tp, struct tc_u_knode *n,
726 struct nlattr **tb)
727{
728 if (tb[TCA_U32_CLASSID])
729 tcf_unbind_filter(tp, &n->res);
730}
731
732static void u32_bind_filter(struct tcf_proto *tp, struct tc_u_knode *n,
733 unsigned long base, struct nlattr **tb)
734{
735 if (tb[TCA_U32_CLASSID]) {
736 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
737 tcf_bind_filter(tp, &n->res, base);
738 }
739}
740
741static int u32_set_parms(struct net *net, struct tcf_proto *tp,
742 struct tc_u_knode *n, struct nlattr **tb,
743 struct nlattr *est, u32 flags, u32 fl_flags,
744 struct netlink_ext_ack *extack)
745{
746 int err, ifindex = -1;
747
748 err = tcf_exts_validate_ex(net, tp, tb, est, &n->exts, flags,
749 fl_flags, extack);
750 if (err < 0)
751 return err;
752
753 if (tb[TCA_U32_INDEV]) {
754 ifindex = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
755 if (ifindex < 0)
756 return -EINVAL;
757 }
758
759 if (tb[TCA_U32_LINK]) {
760 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
761 struct tc_u_hnode *ht_down = NULL, *ht_old;
762
763 if (TC_U32_KEY(handle)) {
764 NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table");
765 return -EINVAL;
766 }
767
768 if (handle) {
769 ht_down = u32_lookup_ht(tp->data, handle);
770
771 if (!ht_down) {
772 NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
773 return -EINVAL;
774 }
775 if (ht_down->is_root) {
776 NL_SET_ERR_MSG_MOD(extack, "Not linking to root node");
777 return -EINVAL;
778 }
779 refcount_inc(&ht_down->refcnt);
780 }
781
782 ht_old = rtnl_dereference(n->ht_down);
783 rcu_assign_pointer(n->ht_down, ht_down);
784
785 if (ht_old)
786 refcount_dec(&ht_old->refcnt);
787 }
788
789 if (ifindex >= 0)
790 n->ifindex = ifindex;
791
792 return 0;
793}
794
795static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
796 struct tc_u_knode *n)
797{
798 struct tc_u_knode __rcu **ins;
799 struct tc_u_knode *pins;
800 struct tc_u_hnode *ht;
801
802 if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
803 ht = rtnl_dereference(tp->root);
804 else
805 ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
806
807 ins = &ht->ht[TC_U32_HASH(n->handle)];
808
809 /* The node must always exist for it to be replaced if this is not the
810 * case then something went very wrong elsewhere.
811 */
812 for (pins = rtnl_dereference(*ins); ;
813 ins = &pins->next, pins = rtnl_dereference(*ins))
814 if (pins->handle == n->handle)
815 break;
816
817 idr_replace(&ht->handle_idr, n, n->handle);
818 RCU_INIT_POINTER(n->next, pins->next);
819 rcu_assign_pointer(*ins, n);
820}
821
822static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
823 struct tc_u_knode *n)
824{
825 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
826 struct tc_u32_sel *s = &n->sel;
827 struct tc_u_knode *new;
828
829 new = kzalloc(struct_size(new, sel.keys, s->nkeys), GFP_KERNEL);
830 if (!new)
831 return NULL;
832
833 RCU_INIT_POINTER(new->next, n->next);
834 new->handle = n->handle;
835 RCU_INIT_POINTER(new->ht_up, n->ht_up);
836
837 new->ifindex = n->ifindex;
838 new->fshift = n->fshift;
839 new->flags = n->flags;
840 RCU_INIT_POINTER(new->ht_down, ht);
841
842#ifdef CONFIG_CLS_U32_PERF
843 /* Statistics may be incremented by readers during update
844 * so we must keep them in tact. When the node is later destroyed
845 * a special destroy call must be made to not free the pf memory.
846 */
847 new->pf = n->pf;
848#endif
849
850#ifdef CONFIG_CLS_U32_MARK
851 new->val = n->val;
852 new->mask = n->mask;
853 /* Similarly success statistics must be moved as pointers */
854 new->pcpu_success = n->pcpu_success;
855#endif
856 memcpy(&new->sel, s, struct_size(s, keys, s->nkeys));
857
858 if (tcf_exts_init(&new->exts, net, TCA_U32_ACT, TCA_U32_POLICE)) {
859 kfree(new);
860 return NULL;
861 }
862
863 /* bump reference count as long as we hold pointer to structure */
864 if (ht)
865 refcount_inc(&ht->refcnt);
866
867 return new;
868}
869
870static int u32_change(struct net *net, struct sk_buff *in_skb,
871 struct tcf_proto *tp, unsigned long base, u32 handle,
872 struct nlattr **tca, void **arg, u32 flags,
873 struct netlink_ext_ack *extack)
874{
875 struct tc_u_common *tp_c = tp->data;
876 struct tc_u_hnode *ht;
877 struct tc_u_knode *n;
878 struct tc_u32_sel *s;
879 struct nlattr *opt = tca[TCA_OPTIONS];
880 struct nlattr *tb[TCA_U32_MAX + 1];
881 u32 htid, userflags = 0;
882 size_t sel_size;
883 int err;
884
885 if (!opt) {
886 if (handle) {
887 NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options");
888 return -EINVAL;
889 } else {
890 return 0;
891 }
892 }
893
894 err = nla_parse_nested_deprecated(tb, TCA_U32_MAX, opt, u32_policy,
895 extack);
896 if (err < 0)
897 return err;
898
899 if (tb[TCA_U32_FLAGS]) {
900 userflags = nla_get_u32(tb[TCA_U32_FLAGS]);
901 if (!tc_flags_valid(userflags)) {
902 NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags");
903 return -EINVAL;
904 }
905 }
906
907 n = *arg;
908 if (n) {
909 struct tc_u_knode *new;
910
911 if (TC_U32_KEY(n->handle) == 0) {
912 NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero");
913 return -EINVAL;
914 }
915
916 if ((n->flags ^ userflags) &
917 ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) {
918 NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
919 return -EINVAL;
920 }
921
922 new = u32_init_knode(net, tp, n);
923 if (!new)
924 return -ENOMEM;
925
926 err = u32_set_parms(net, tp, new, tb, tca[TCA_RATE],
927 flags, new->flags, extack);
928
929 if (err) {
930 __u32_destroy_key(new);
931 return err;
932 }
933
934 u32_bind_filter(tp, new, base, tb);
935
936 err = u32_replace_hw_knode(tp, new, flags, extack);
937 if (err) {
938 u32_unbind_filter(tp, new, tb);
939
940 if (tb[TCA_U32_LINK]) {
941 struct tc_u_hnode *ht_old;
942
943 ht_old = rtnl_dereference(n->ht_down);
944 if (ht_old)
945 refcount_inc(&ht_old->refcnt);
946 }
947 __u32_destroy_key(new);
948 return err;
949 }
950
951 if (!tc_in_hw(new->flags))
952 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
953
954 tcf_proto_update_usesw(tp, new->flags);
955
956 u32_replace_knode(tp, tp_c, new);
957 tcf_unbind_filter(tp, &n->res);
958 tcf_exts_get_net(&n->exts);
959 tcf_queue_work(&n->rwork, u32_delete_key_work);
960 return 0;
961 }
962
963 if (tb[TCA_U32_DIVISOR]) {
964 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
965
966 if (!is_power_of_2(divisor)) {
967 NL_SET_ERR_MSG_MOD(extack, "Divisor is not a power of 2");
968 return -EINVAL;
969 }
970 if (divisor-- > 0x100) {
971 NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
972 return -EINVAL;
973 }
974 if (TC_U32_KEY(handle)) {
975 NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
976 return -EINVAL;
977 }
978 ht = kzalloc(struct_size(ht, ht, divisor + 1), GFP_KERNEL);
979 if (ht == NULL)
980 return -ENOBUFS;
981 if (handle == 0) {
982 handle = gen_new_htid(tp->data, ht);
983 if (handle == 0) {
984 kfree(ht);
985 return -ENOMEM;
986 }
987 } else {
988 err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle,
989 handle, GFP_KERNEL);
990 if (err) {
991 kfree(ht);
992 return err;
993 }
994 }
995 refcount_set(&ht->refcnt, 1);
996 ht->divisor = divisor;
997 ht->handle = handle;
998 ht->prio = tp->prio;
999 idr_init(&ht->handle_idr);
1000 ht->flags = userflags;
1001
1002 err = u32_replace_hw_hnode(tp, ht, userflags, extack);
1003 if (err) {
1004 idr_remove(&tp_c->handle_idr, handle2id(handle));
1005 kfree(ht);
1006 return err;
1007 }
1008
1009 RCU_INIT_POINTER(ht->next, tp_c->hlist);
1010 rcu_assign_pointer(tp_c->hlist, ht);
1011 *arg = ht;
1012
1013 return 0;
1014 }
1015
1016 if (tb[TCA_U32_HASH]) {
1017 htid = nla_get_u32(tb[TCA_U32_HASH]);
1018 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
1019 ht = rtnl_dereference(tp->root);
1020 htid = ht->handle;
1021 } else {
1022 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
1023 if (!ht) {
1024 NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found");
1025 return -EINVAL;
1026 }
1027 }
1028 } else {
1029 ht = rtnl_dereference(tp->root);
1030 htid = ht->handle;
1031 }
1032
1033 if (ht->divisor < TC_U32_HASH(htid)) {
1034 NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value");
1035 return -EINVAL;
1036 }
1037
1038 /* At this point, we need to derive the new handle that will be used to
1039 * uniquely map the identity of this table match entry. The
1040 * identity of the entry that we need to construct is 32 bits made of:
1041 * htid(12b):bucketid(8b):node/entryid(12b)
1042 *
1043 * At this point _we have the table(ht)_ in which we will insert this
1044 * entry. We carry the table's id in variable "htid".
1045 * Note that earlier code picked the ht selection either by a) the user
1046 * providing the htid specified via TCA_U32_HASH attribute or b) when
1047 * no such attribute is passed then the root ht, is default to at ID
1048 * 0x[800][00][000]. Rule: the root table has a single bucket with ID 0.
1049 * If OTOH the user passed us the htid, they may also pass a bucketid of
1050 * choice. 0 is fine. For example a user htid is 0x[600][01][000] it is
1051 * indicating hash bucketid of 1. Rule: the entry/node ID _cannot_ be
1052 * passed via the htid, so even if it was non-zero it will be ignored.
1053 *
1054 * We may also have a handle, if the user passed one. The handle also
1055 * carries the same addressing of htid(12b):bucketid(8b):node/entryid(12b).
1056 * Rule: the bucketid on the handle is ignored even if one was passed;
1057 * rather the value on "htid" is always assumed to be the bucketid.
1058 */
1059 if (handle) {
1060 /* Rule: The htid from handle and tableid from htid must match */
1061 if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
1062 NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
1063 return -EINVAL;
1064 }
1065 /* Ok, so far we have a valid htid(12b):bucketid(8b) but we
1066 * need to finalize the table entry identification with the last
1067 * part - the node/entryid(12b)). Rule: Nodeid _cannot be 0_ for
1068 * entries. Rule: nodeid of 0 is reserved only for tables(see
1069 * earlier code which processes TC_U32_DIVISOR attribute).
1070 * Rule: The nodeid can only be derived from the handle (and not
1071 * htid).
1072 * Rule: if the handle specified zero for the node id example
1073 * 0x60000000, then pick a new nodeid from the pool of IDs
1074 * this hash table has been allocating from.
1075 * If OTOH it is specified (i.e for example the user passed a
1076 * handle such as 0x60000123), then we use it generate our final
1077 * handle which is used to uniquely identify the match entry.
1078 */
1079 if (!TC_U32_NODE(handle)) {
1080 handle = gen_new_kid(ht, htid);
1081 } else {
1082 handle = htid | TC_U32_NODE(handle);
1083 err = idr_alloc_u32(&ht->handle_idr, NULL, &handle,
1084 handle, GFP_KERNEL);
1085 if (err)
1086 return err;
1087 }
1088 } else {
1089 /* The user did not give us a handle; lets just generate one
1090 * from the table's pool of nodeids.
1091 */
1092 handle = gen_new_kid(ht, htid);
1093 }
1094
1095 if (tb[TCA_U32_SEL] == NULL) {
1096 NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
1097 err = -EINVAL;
1098 goto erridr;
1099 }
1100
1101 s = nla_data(tb[TCA_U32_SEL]);
1102 sel_size = struct_size(s, keys, s->nkeys);
1103 if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
1104 err = -EINVAL;
1105 goto erridr;
1106 }
1107
1108 n = kzalloc(struct_size(n, sel.keys, s->nkeys), GFP_KERNEL);
1109 if (n == NULL) {
1110 err = -ENOBUFS;
1111 goto erridr;
1112 }
1113
1114#ifdef CONFIG_CLS_U32_PERF
1115 n->pf = __alloc_percpu(struct_size(n->pf, kcnts, s->nkeys),
1116 __alignof__(struct tc_u32_pcnt));
1117 if (!n->pf) {
1118 err = -ENOBUFS;
1119 goto errfree;
1120 }
1121#endif
1122
1123 unsafe_memcpy(&n->sel, s, sel_size,
1124 /* A composite flex-array structure destination,
1125 * which was correctly sized with struct_size(),
1126 * bounds-checked against nla_len(), and allocated
1127 * above. */);
1128 RCU_INIT_POINTER(n->ht_up, ht);
1129 n->handle = handle;
1130 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
1131 n->flags = userflags;
1132
1133 err = tcf_exts_init(&n->exts, net, TCA_U32_ACT, TCA_U32_POLICE);
1134 if (err < 0)
1135 goto errout;
1136
1137#ifdef CONFIG_CLS_U32_MARK
1138 n->pcpu_success = alloc_percpu(u32);
1139 if (!n->pcpu_success) {
1140 err = -ENOMEM;
1141 goto errout;
1142 }
1143
1144 if (tb[TCA_U32_MARK]) {
1145 struct tc_u32_mark *mark;
1146
1147 mark = nla_data(tb[TCA_U32_MARK]);
1148 n->val = mark->val;
1149 n->mask = mark->mask;
1150 }
1151#endif
1152
1153 err = u32_set_parms(net, tp, n, tb, tca[TCA_RATE],
1154 flags, n->flags, extack);
1155
1156 u32_bind_filter(tp, n, base, tb);
1157
1158 if (err == 0) {
1159 struct tc_u_knode __rcu **ins;
1160 struct tc_u_knode *pins;
1161
1162 err = u32_replace_hw_knode(tp, n, flags, extack);
1163 if (err)
1164 goto errunbind;
1165
1166 if (!tc_in_hw(n->flags))
1167 n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1168
1169 tcf_proto_update_usesw(tp, n->flags);
1170
1171 ins = &ht->ht[TC_U32_HASH(handle)];
1172 for (pins = rtnl_dereference(*ins); pins;
1173 ins = &pins->next, pins = rtnl_dereference(*ins))
1174 if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
1175 break;
1176
1177 RCU_INIT_POINTER(n->next, pins);
1178 rcu_assign_pointer(*ins, n);
1179 tp_c->knodes++;
1180 *arg = n;
1181 return 0;
1182 }
1183
1184errunbind:
1185 u32_unbind_filter(tp, n, tb);
1186
1187#ifdef CONFIG_CLS_U32_MARK
1188 free_percpu(n->pcpu_success);
1189#endif
1190
1191errout:
1192 tcf_exts_destroy(&n->exts);
1193#ifdef CONFIG_CLS_U32_PERF
1194errfree:
1195 free_percpu(n->pf);
1196#endif
1197 kfree(n);
1198erridr:
1199 idr_remove(&ht->handle_idr, handle);
1200 return err;
1201}
1202
1203static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1204 bool rtnl_held)
1205{
1206 struct tc_u_common *tp_c = tp->data;
1207 struct tc_u_hnode *ht;
1208 struct tc_u_knode *n;
1209 unsigned int h;
1210
1211 if (arg->stop)
1212 return;
1213
1214 for (ht = rtnl_dereference(tp_c->hlist);
1215 ht;
1216 ht = rtnl_dereference(ht->next)) {
1217 if (ht->prio != tp->prio)
1218 continue;
1219
1220 if (!tc_cls_stats_dump(tp, arg, ht))
1221 return;
1222
1223 for (h = 0; h <= ht->divisor; h++) {
1224 for (n = rtnl_dereference(ht->ht[h]);
1225 n;
1226 n = rtnl_dereference(n->next)) {
1227 if (!tc_cls_stats_dump(tp, arg, n))
1228 return;
1229 }
1230 }
1231 }
1232}
1233
1234static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
1235 bool add, flow_setup_cb_t *cb, void *cb_priv,
1236 struct netlink_ext_ack *extack)
1237{
1238 struct tc_cls_u32_offload cls_u32 = {};
1239 int err;
1240
1241 tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
1242 cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
1243 cls_u32.hnode.divisor = ht->divisor;
1244 cls_u32.hnode.handle = ht->handle;
1245 cls_u32.hnode.prio = ht->prio;
1246
1247 err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
1248 if (err && add && tc_skip_sw(ht->flags))
1249 return err;
1250
1251 return 0;
1252}
1253
1254static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
1255 bool add, flow_setup_cb_t *cb, void *cb_priv,
1256 struct netlink_ext_ack *extack)
1257{
1258 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
1259 struct tcf_block *block = tp->chain->block;
1260 struct tc_cls_u32_offload cls_u32 = {};
1261
1262 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
1263 cls_u32.command = add ?
1264 TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
1265 cls_u32.knode.handle = n->handle;
1266
1267 if (add) {
1268 cls_u32.knode.fshift = n->fshift;
1269#ifdef CONFIG_CLS_U32_MARK
1270 cls_u32.knode.val = n->val;
1271 cls_u32.knode.mask = n->mask;
1272#else
1273 cls_u32.knode.val = 0;
1274 cls_u32.knode.mask = 0;
1275#endif
1276 cls_u32.knode.sel = &n->sel;
1277 cls_u32.knode.res = &n->res;
1278 cls_u32.knode.exts = &n->exts;
1279 if (n->ht_down)
1280 cls_u32.knode.link_handle = ht->handle;
1281 }
1282
1283 return tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32,
1284 &cls_u32, cb_priv, &n->flags,
1285 &n->in_hw_count);
1286}
1287
1288static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1289 void *cb_priv, struct netlink_ext_ack *extack)
1290{
1291 struct tc_u_common *tp_c = tp->data;
1292 struct tc_u_hnode *ht;
1293 struct tc_u_knode *n;
1294 unsigned int h;
1295 int err;
1296
1297 for (ht = rtnl_dereference(tp_c->hlist);
1298 ht;
1299 ht = rtnl_dereference(ht->next)) {
1300 if (ht->prio != tp->prio)
1301 continue;
1302
1303 /* When adding filters to a new dev, try to offload the
1304 * hashtable first. When removing, do the filters before the
1305 * hashtable.
1306 */
1307 if (add && !tc_skip_hw(ht->flags)) {
1308 err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
1309 extack);
1310 if (err)
1311 return err;
1312 }
1313
1314 for (h = 0; h <= ht->divisor; h++) {
1315 for (n = rtnl_dereference(ht->ht[h]);
1316 n;
1317 n = rtnl_dereference(n->next)) {
1318 if (tc_skip_hw(n->flags))
1319 continue;
1320
1321 err = u32_reoffload_knode(tp, n, add, cb,
1322 cb_priv, extack);
1323 if (err)
1324 return err;
1325 }
1326 }
1327
1328 if (!add && !tc_skip_hw(ht->flags))
1329 u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
1330 }
1331
1332 return 0;
1333}
1334
1335static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
1336 unsigned long base)
1337{
1338 struct tc_u_knode *n = fh;
1339
1340 tc_cls_bind_class(classid, cl, q, &n->res, base);
1341}
1342
1343static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
1344 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
1345{
1346 struct tc_u_knode *n = fh;
1347 struct tc_u_hnode *ht_up, *ht_down;
1348 struct nlattr *nest;
1349
1350 if (n == NULL)
1351 return skb->len;
1352
1353 t->tcm_handle = n->handle;
1354
1355 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1356 if (nest == NULL)
1357 goto nla_put_failure;
1358
1359 if (TC_U32_KEY(n->handle) == 0) {
1360 struct tc_u_hnode *ht = fh;
1361 u32 divisor = ht->divisor + 1;
1362
1363 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1364 goto nla_put_failure;
1365 } else {
1366#ifdef CONFIG_CLS_U32_PERF
1367 struct tc_u32_pcnt *gpf;
1368 int cpu;
1369#endif
1370
1371 if (nla_put(skb, TCA_U32_SEL, struct_size(&n->sel, keys, n->sel.nkeys),
1372 &n->sel))
1373 goto nla_put_failure;
1374
1375 ht_up = rtnl_dereference(n->ht_up);
1376 if (ht_up) {
1377 u32 htid = n->handle & 0xFFFFF000;
1378 if (nla_put_u32(skb, TCA_U32_HASH, htid))
1379 goto nla_put_failure;
1380 }
1381 if (n->res.classid &&
1382 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1383 goto nla_put_failure;
1384
1385 ht_down = rtnl_dereference(n->ht_down);
1386 if (ht_down &&
1387 nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
1388 goto nla_put_failure;
1389
1390 if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1391 goto nla_put_failure;
1392
1393#ifdef CONFIG_CLS_U32_MARK
1394 if ((n->val || n->mask)) {
1395 struct tc_u32_mark mark = {.val = n->val,
1396 .mask = n->mask,
1397 .success = 0};
1398 int cpum;
1399
1400 for_each_possible_cpu(cpum) {
1401 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
1402
1403 mark.success += cnt;
1404 }
1405
1406 if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1407 goto nla_put_failure;
1408 }
1409#endif
1410
1411 if (tcf_exts_dump(skb, &n->exts) < 0)
1412 goto nla_put_failure;
1413
1414 if (n->ifindex) {
1415 struct net_device *dev;
1416 dev = __dev_get_by_index(net, n->ifindex);
1417 if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1418 goto nla_put_failure;
1419 }
1420#ifdef CONFIG_CLS_U32_PERF
1421 gpf = kzalloc(struct_size(gpf, kcnts, n->sel.nkeys), GFP_KERNEL);
1422 if (!gpf)
1423 goto nla_put_failure;
1424
1425 for_each_possible_cpu(cpu) {
1426 int i;
1427 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1428
1429 gpf->rcnt += pf->rcnt;
1430 gpf->rhit += pf->rhit;
1431 for (i = 0; i < n->sel.nkeys; i++)
1432 gpf->kcnts[i] += pf->kcnts[i];
1433 }
1434
1435 if (nla_put_64bit(skb, TCA_U32_PCNT, struct_size(gpf, kcnts, n->sel.nkeys),
1436 gpf, TCA_U32_PAD)) {
1437 kfree(gpf);
1438 goto nla_put_failure;
1439 }
1440 kfree(gpf);
1441#endif
1442 }
1443
1444 nla_nest_end(skb, nest);
1445
1446 if (TC_U32_KEY(n->handle))
1447 if (tcf_exts_dump_stats(skb, &n->exts) < 0)
1448 goto nla_put_failure;
1449 return skb->len;
1450
1451nla_put_failure:
1452 nla_nest_cancel(skb, nest);
1453 return -1;
1454}
1455
1456static struct tcf_proto_ops cls_u32_ops __read_mostly = {
1457 .kind = "u32",
1458 .classify = u32_classify,
1459 .init = u32_init,
1460 .destroy = u32_destroy,
1461 .get = u32_get,
1462 .change = u32_change,
1463 .delete = u32_delete,
1464 .walk = u32_walk,
1465 .reoffload = u32_reoffload,
1466 .dump = u32_dump,
1467 .bind_class = u32_bind_class,
1468 .owner = THIS_MODULE,
1469};
1470MODULE_ALIAS_NET_CLS("u32");
1471
1472static int __init init_u32(void)
1473{
1474 int i, ret;
1475
1476 pr_info("u32 classifier\n");
1477#ifdef CONFIG_CLS_U32_PERF
1478 pr_info(" Performance counters on\n");
1479#endif
1480 pr_info(" input device check on\n");
1481#ifdef CONFIG_NET_CLS_ACT
1482 pr_info(" Actions configured\n");
1483#endif
1484 tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE,
1485 sizeof(struct hlist_head),
1486 GFP_KERNEL);
1487 if (!tc_u_common_hash)
1488 return -ENOMEM;
1489
1490 for (i = 0; i < U32_HASH_SIZE; i++)
1491 INIT_HLIST_HEAD(&tc_u_common_hash[i]);
1492
1493 ret = register_tcf_proto_ops(&cls_u32_ops);
1494 if (ret)
1495 kvfree(tc_u_common_hash);
1496 return ret;
1497}
1498
1499static void __exit exit_u32(void)
1500{
1501 unregister_tcf_proto_ops(&cls_u32_ops);
1502 kvfree(tc_u_common_hash);
1503}
1504
1505module_init(init_u32)
1506module_exit(exit_u32)
1507MODULE_DESCRIPTION("Universal 32bit based TC Classifier");
1508MODULE_LICENSE("GPL");