Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * The filters are packed to hash tables of key nodes
8 * with a set of 32bit key/mask pairs at every node.
9 * Nodes reference next level hash tables etc.
10 *
11 * This scheme is the best universal classifier I managed to
12 * invent; it is not super-fast, but it is not slow (provided you
13 * program it correctly), and general enough. And its relative
14 * speed grows as the number of rules becomes larger.
15 *
16 * It seems that it represents the best middle point between
17 * speed and manageability both by human and by machine.
18 *
19 * It is especially useful for link sharing combined with QoS;
20 * pure RSVP doesn't need such a general approach and can use
21 * much simpler (and faster) schemes, sort of cls_rsvp.c.
22 *
23 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
24 */
25
26#include <linux/module.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29#include <linux/kernel.h>
30#include <linux/string.h>
31#include <linux/errno.h>
32#include <linux/percpu.h>
33#include <linux/rtnetlink.h>
34#include <linux/skbuff.h>
35#include <linux/bitmap.h>
36#include <linux/netdevice.h>
37#include <linux/hash.h>
38#include <net/netlink.h>
39#include <net/act_api.h>
40#include <net/pkt_cls.h>
41#include <linux/idr.h>
42
43struct tc_u_knode {
44 struct tc_u_knode __rcu *next;
45 u32 handle;
46 struct tc_u_hnode __rcu *ht_up;
47 struct tcf_exts exts;
48 int ifindex;
49 u8 fshift;
50 struct tcf_result res;
51 struct tc_u_hnode __rcu *ht_down;
52#ifdef CONFIG_CLS_U32_PERF
53 struct tc_u32_pcnt __percpu *pf;
54#endif
55 u32 flags;
56 unsigned int in_hw_count;
57#ifdef CONFIG_CLS_U32_MARK
58 u32 val;
59 u32 mask;
60 u32 __percpu *pcpu_success;
61#endif
62 struct rcu_work rwork;
63 /* The 'sel' field MUST be the last field in structure to allow for
64 * tc_u32_keys allocated at end of structure.
65 */
66 struct tc_u32_sel sel;
67};
68
69struct tc_u_hnode {
70 struct tc_u_hnode __rcu *next;
71 u32 handle;
72 u32 prio;
73 int refcnt;
74 unsigned int divisor;
75 struct idr handle_idr;
76 bool is_root;
77 struct rcu_head rcu;
78 u32 flags;
79 /* The 'ht' field MUST be the last field in structure to allow for
80 * more entries allocated at end of structure.
81 */
82 struct tc_u_knode __rcu *ht[1];
83};
84
85struct tc_u_common {
86 struct tc_u_hnode __rcu *hlist;
87 void *ptr;
88 int refcnt;
89 struct idr handle_idr;
90 struct hlist_node hnode;
91 long knodes;
92};
93
94static inline unsigned int u32_hash_fold(__be32 key,
95 const struct tc_u32_sel *sel,
96 u8 fshift)
97{
98 unsigned int h = ntohl(key & sel->hmask) >> fshift;
99
100 return h;
101}
102
103static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
104 struct tcf_result *res)
105{
106 struct {
107 struct tc_u_knode *knode;
108 unsigned int off;
109 } stack[TC_U32_MAXDEPTH];
110
111 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
112 unsigned int off = skb_network_offset(skb);
113 struct tc_u_knode *n;
114 int sdepth = 0;
115 int off2 = 0;
116 int sel = 0;
117#ifdef CONFIG_CLS_U32_PERF
118 int j;
119#endif
120 int i, r;
121
122next_ht:
123 n = rcu_dereference_bh(ht->ht[sel]);
124
125next_knode:
126 if (n) {
127 struct tc_u32_key *key = n->sel.keys;
128
129#ifdef CONFIG_CLS_U32_PERF
130 __this_cpu_inc(n->pf->rcnt);
131 j = 0;
132#endif
133
134 if (tc_skip_sw(n->flags)) {
135 n = rcu_dereference_bh(n->next);
136 goto next_knode;
137 }
138
139#ifdef CONFIG_CLS_U32_MARK
140 if ((skb->mark & n->mask) != n->val) {
141 n = rcu_dereference_bh(n->next);
142 goto next_knode;
143 } else {
144 __this_cpu_inc(*n->pcpu_success);
145 }
146#endif
147
148 for (i = n->sel.nkeys; i > 0; i--, key++) {
149 int toff = off + key->off + (off2 & key->offmask);
150 __be32 *data, hdata;
151
152 if (skb_headroom(skb) + toff > INT_MAX)
153 goto out;
154
155 data = skb_header_pointer(skb, toff, 4, &hdata);
156 if (!data)
157 goto out;
158 if ((*data ^ key->val) & key->mask) {
159 n = rcu_dereference_bh(n->next);
160 goto next_knode;
161 }
162#ifdef CONFIG_CLS_U32_PERF
163 __this_cpu_inc(n->pf->kcnts[j]);
164 j++;
165#endif
166 }
167
168 ht = rcu_dereference_bh(n->ht_down);
169 if (!ht) {
170check_terminal:
171 if (n->sel.flags & TC_U32_TERMINAL) {
172
173 *res = n->res;
174 if (!tcf_match_indev(skb, n->ifindex)) {
175 n = rcu_dereference_bh(n->next);
176 goto next_knode;
177 }
178#ifdef CONFIG_CLS_U32_PERF
179 __this_cpu_inc(n->pf->rhit);
180#endif
181 r = tcf_exts_exec(skb, &n->exts, res);
182 if (r < 0) {
183 n = rcu_dereference_bh(n->next);
184 goto next_knode;
185 }
186
187 return r;
188 }
189 n = rcu_dereference_bh(n->next);
190 goto next_knode;
191 }
192
193 /* PUSH */
194 if (sdepth >= TC_U32_MAXDEPTH)
195 goto deadloop;
196 stack[sdepth].knode = n;
197 stack[sdepth].off = off;
198 sdepth++;
199
200 ht = rcu_dereference_bh(n->ht_down);
201 sel = 0;
202 if (ht->divisor) {
203 __be32 *data, hdata;
204
205 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
206 &hdata);
207 if (!data)
208 goto out;
209 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
210 n->fshift);
211 }
212 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
213 goto next_ht;
214
215 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
216 off2 = n->sel.off + 3;
217 if (n->sel.flags & TC_U32_VAROFFSET) {
218 __be16 *data, hdata;
219
220 data = skb_header_pointer(skb,
221 off + n->sel.offoff,
222 2, &hdata);
223 if (!data)
224 goto out;
225 off2 += ntohs(n->sel.offmask & *data) >>
226 n->sel.offshift;
227 }
228 off2 &= ~3;
229 }
230 if (n->sel.flags & TC_U32_EAT) {
231 off += off2;
232 off2 = 0;
233 }
234
235 if (off < skb->len)
236 goto next_ht;
237 }
238
239 /* POP */
240 if (sdepth--) {
241 n = stack[sdepth].knode;
242 ht = rcu_dereference_bh(n->ht_up);
243 off = stack[sdepth].off;
244 goto check_terminal;
245 }
246out:
247 return -1;
248
249deadloop:
250 net_warn_ratelimited("cls_u32: dead loop\n");
251 return -1;
252}
253
254static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
255{
256 struct tc_u_hnode *ht;
257
258 for (ht = rtnl_dereference(tp_c->hlist);
259 ht;
260 ht = rtnl_dereference(ht->next))
261 if (ht->handle == handle)
262 break;
263
264 return ht;
265}
266
267static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
268{
269 unsigned int sel;
270 struct tc_u_knode *n = NULL;
271
272 sel = TC_U32_HASH(handle);
273 if (sel > ht->divisor)
274 goto out;
275
276 for (n = rtnl_dereference(ht->ht[sel]);
277 n;
278 n = rtnl_dereference(n->next))
279 if (n->handle == handle)
280 break;
281out:
282 return n;
283}
284
285
286static void *u32_get(struct tcf_proto *tp, u32 handle)
287{
288 struct tc_u_hnode *ht;
289 struct tc_u_common *tp_c = tp->data;
290
291 if (TC_U32_HTID(handle) == TC_U32_ROOT)
292 ht = rtnl_dereference(tp->root);
293 else
294 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
295
296 if (!ht)
297 return NULL;
298
299 if (TC_U32_KEY(handle) == 0)
300 return ht;
301
302 return u32_lookup_key(ht, handle);
303}
304
305/* Protected by rtnl lock */
306static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
307{
308 int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
309 if (id < 0)
310 return 0;
311 return (id | 0x800U) << 20;
312}
313
314static struct hlist_head *tc_u_common_hash;
315
316#define U32_HASH_SHIFT 10
317#define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
318
319static void *tc_u_common_ptr(const struct tcf_proto *tp)
320{
321 struct tcf_block *block = tp->chain->block;
322
323 /* The block sharing is currently supported only
324 * for classless qdiscs. In that case we use block
325 * for tc_u_common identification. In case the
326 * block is not shared, block->q is a valid pointer
327 * and we can use that. That works for classful qdiscs.
328 */
329 if (tcf_block_shared(block))
330 return block;
331 else
332 return block->q;
333}
334
335static struct hlist_head *tc_u_hash(void *key)
336{
337 return tc_u_common_hash + hash_ptr(key, U32_HASH_SHIFT);
338}
339
340static struct tc_u_common *tc_u_common_find(void *key)
341{
342 struct tc_u_common *tc;
343 hlist_for_each_entry(tc, tc_u_hash(key), hnode) {
344 if (tc->ptr == key)
345 return tc;
346 }
347 return NULL;
348}
349
350static int u32_init(struct tcf_proto *tp)
351{
352 struct tc_u_hnode *root_ht;
353 void *key = tc_u_common_ptr(tp);
354 struct tc_u_common *tp_c = tc_u_common_find(key);
355
356 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
357 if (root_ht == NULL)
358 return -ENOBUFS;
359
360 root_ht->refcnt++;
361 root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
362 root_ht->prio = tp->prio;
363 root_ht->is_root = true;
364 idr_init(&root_ht->handle_idr);
365
366 if (tp_c == NULL) {
367 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
368 if (tp_c == NULL) {
369 kfree(root_ht);
370 return -ENOBUFS;
371 }
372 tp_c->ptr = key;
373 INIT_HLIST_NODE(&tp_c->hnode);
374 idr_init(&tp_c->handle_idr);
375
376 hlist_add_head(&tp_c->hnode, tc_u_hash(key));
377 }
378
379 tp_c->refcnt++;
380 RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
381 rcu_assign_pointer(tp_c->hlist, root_ht);
382
383 root_ht->refcnt++;
384 rcu_assign_pointer(tp->root, root_ht);
385 tp->data = tp_c;
386 return 0;
387}
388
389static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
390{
391 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
392
393 tcf_exts_destroy(&n->exts);
394 tcf_exts_put_net(&n->exts);
395 if (ht && --ht->refcnt == 0)
396 kfree(ht);
397#ifdef CONFIG_CLS_U32_PERF
398 if (free_pf)
399 free_percpu(n->pf);
400#endif
401#ifdef CONFIG_CLS_U32_MARK
402 if (free_pf)
403 free_percpu(n->pcpu_success);
404#endif
405 kfree(n);
406 return 0;
407}
408
409/* u32_delete_key_rcu should be called when free'ing a copied
410 * version of a tc_u_knode obtained from u32_init_knode(). When
411 * copies are obtained from u32_init_knode() the statistics are
412 * shared between the old and new copies to allow readers to
413 * continue to update the statistics during the copy. To support
414 * this the u32_delete_key_rcu variant does not free the percpu
415 * statistics.
416 */
417static void u32_delete_key_work(struct work_struct *work)
418{
419 struct tc_u_knode *key = container_of(to_rcu_work(work),
420 struct tc_u_knode,
421 rwork);
422 rtnl_lock();
423 u32_destroy_key(key, false);
424 rtnl_unlock();
425}
426
427/* u32_delete_key_freepf_rcu is the rcu callback variant
428 * that free's the entire structure including the statistics
429 * percpu variables. Only use this if the key is not a copy
430 * returned by u32_init_knode(). See u32_delete_key_rcu()
431 * for the variant that should be used with keys return from
432 * u32_init_knode()
433 */
434static void u32_delete_key_freepf_work(struct work_struct *work)
435{
436 struct tc_u_knode *key = container_of(to_rcu_work(work),
437 struct tc_u_knode,
438 rwork);
439 rtnl_lock();
440 u32_destroy_key(key, true);
441 rtnl_unlock();
442}
443
444static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
445{
446 struct tc_u_common *tp_c = tp->data;
447 struct tc_u_knode __rcu **kp;
448 struct tc_u_knode *pkp;
449 struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
450
451 if (ht) {
452 kp = &ht->ht[TC_U32_HASH(key->handle)];
453 for (pkp = rtnl_dereference(*kp); pkp;
454 kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
455 if (pkp == key) {
456 RCU_INIT_POINTER(*kp, key->next);
457 tp_c->knodes--;
458
459 tcf_unbind_filter(tp, &key->res);
460 idr_remove(&ht->handle_idr, key->handle);
461 tcf_exts_get_net(&key->exts);
462 tcf_queue_work(&key->rwork, u32_delete_key_freepf_work);
463 return 0;
464 }
465 }
466 }
467 WARN_ON(1);
468 return 0;
469}
470
471static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
472 struct netlink_ext_ack *extack)
473{
474 struct tcf_block *block = tp->chain->block;
475 struct tc_cls_u32_offload cls_u32 = {};
476
477 tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
478 cls_u32.command = TC_CLSU32_DELETE_HNODE;
479 cls_u32.hnode.divisor = h->divisor;
480 cls_u32.hnode.handle = h->handle;
481 cls_u32.hnode.prio = h->prio;
482
483 tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, false, true);
484}
485
486static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
487 u32 flags, struct netlink_ext_ack *extack)
488{
489 struct tcf_block *block = tp->chain->block;
490 struct tc_cls_u32_offload cls_u32 = {};
491 bool skip_sw = tc_skip_sw(flags);
492 bool offloaded = false;
493 int err;
494
495 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
496 cls_u32.command = TC_CLSU32_NEW_HNODE;
497 cls_u32.hnode.divisor = h->divisor;
498 cls_u32.hnode.handle = h->handle;
499 cls_u32.hnode.prio = h->prio;
500
501 err = tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, skip_sw, true);
502 if (err < 0) {
503 u32_clear_hw_hnode(tp, h, NULL);
504 return err;
505 } else if (err > 0) {
506 offloaded = true;
507 }
508
509 if (skip_sw && !offloaded)
510 return -EINVAL;
511
512 return 0;
513}
514
515static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
516 struct netlink_ext_ack *extack)
517{
518 struct tcf_block *block = tp->chain->block;
519 struct tc_cls_u32_offload cls_u32 = {};
520
521 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
522 cls_u32.command = TC_CLSU32_DELETE_KNODE;
523 cls_u32.knode.handle = n->handle;
524
525 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSU32, &cls_u32, false,
526 &n->flags, &n->in_hw_count, true);
527}
528
529static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
530 u32 flags, struct netlink_ext_ack *extack)
531{
532 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
533 struct tcf_block *block = tp->chain->block;
534 struct tc_cls_u32_offload cls_u32 = {};
535 bool skip_sw = tc_skip_sw(flags);
536 int err;
537
538 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
539 cls_u32.command = TC_CLSU32_REPLACE_KNODE;
540 cls_u32.knode.handle = n->handle;
541 cls_u32.knode.fshift = n->fshift;
542#ifdef CONFIG_CLS_U32_MARK
543 cls_u32.knode.val = n->val;
544 cls_u32.knode.mask = n->mask;
545#else
546 cls_u32.knode.val = 0;
547 cls_u32.knode.mask = 0;
548#endif
549 cls_u32.knode.sel = &n->sel;
550 cls_u32.knode.res = &n->res;
551 cls_u32.knode.exts = &n->exts;
552 if (n->ht_down)
553 cls_u32.knode.link_handle = ht->handle;
554
555 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSU32, &cls_u32, skip_sw,
556 &n->flags, &n->in_hw_count, true);
557 if (err) {
558 u32_remove_hw_knode(tp, n, NULL);
559 return err;
560 }
561
562 if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
563 return -EINVAL;
564
565 return 0;
566}
567
568static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
569 struct netlink_ext_ack *extack)
570{
571 struct tc_u_common *tp_c = tp->data;
572 struct tc_u_knode *n;
573 unsigned int h;
574
575 for (h = 0; h <= ht->divisor; h++) {
576 while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
577 RCU_INIT_POINTER(ht->ht[h],
578 rtnl_dereference(n->next));
579 tp_c->knodes--;
580 tcf_unbind_filter(tp, &n->res);
581 u32_remove_hw_knode(tp, n, extack);
582 idr_remove(&ht->handle_idr, n->handle);
583 if (tcf_exts_get_net(&n->exts))
584 tcf_queue_work(&n->rwork, u32_delete_key_freepf_work);
585 else
586 u32_destroy_key(n, true);
587 }
588 }
589}
590
591static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
592 struct netlink_ext_ack *extack)
593{
594 struct tc_u_common *tp_c = tp->data;
595 struct tc_u_hnode __rcu **hn;
596 struct tc_u_hnode *phn;
597
598 WARN_ON(--ht->refcnt);
599
600 u32_clear_hnode(tp, ht, extack);
601
602 hn = &tp_c->hlist;
603 for (phn = rtnl_dereference(*hn);
604 phn;
605 hn = &phn->next, phn = rtnl_dereference(*hn)) {
606 if (phn == ht) {
607 u32_clear_hw_hnode(tp, ht, extack);
608 idr_destroy(&ht->handle_idr);
609 idr_remove(&tp_c->handle_idr, ht->handle);
610 RCU_INIT_POINTER(*hn, ht->next);
611 kfree_rcu(ht, rcu);
612 return 0;
613 }
614 }
615
616 return -ENOENT;
617}
618
619static void u32_destroy(struct tcf_proto *tp, bool rtnl_held,
620 struct netlink_ext_ack *extack)
621{
622 struct tc_u_common *tp_c = tp->data;
623 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
624
625 WARN_ON(root_ht == NULL);
626
627 if (root_ht && --root_ht->refcnt == 1)
628 u32_destroy_hnode(tp, root_ht, extack);
629
630 if (--tp_c->refcnt == 0) {
631 struct tc_u_hnode *ht;
632
633 hlist_del(&tp_c->hnode);
634
635 while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
636 u32_clear_hnode(tp, ht, extack);
637 RCU_INIT_POINTER(tp_c->hlist, ht->next);
638
639 /* u32_destroy_key() will later free ht for us, if it's
640 * still referenced by some knode
641 */
642 if (--ht->refcnt == 0)
643 kfree_rcu(ht, rcu);
644 }
645
646 idr_destroy(&tp_c->handle_idr);
647 kfree(tp_c);
648 }
649
650 tp->data = NULL;
651}
652
653static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
654 bool rtnl_held, struct netlink_ext_ack *extack)
655{
656 struct tc_u_hnode *ht = arg;
657 struct tc_u_common *tp_c = tp->data;
658 int ret = 0;
659
660 if (TC_U32_KEY(ht->handle)) {
661 u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
662 ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
663 goto out;
664 }
665
666 if (ht->is_root) {
667 NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
668 return -EINVAL;
669 }
670
671 if (ht->refcnt == 1) {
672 u32_destroy_hnode(tp, ht, extack);
673 } else {
674 NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
675 return -EBUSY;
676 }
677
678out:
679 *last = tp_c->refcnt == 1 && tp_c->knodes == 0;
680 return ret;
681}
682
683static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
684{
685 u32 index = htid | 0x800;
686 u32 max = htid | 0xFFF;
687
688 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) {
689 index = htid + 1;
690 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max,
691 GFP_KERNEL))
692 index = max;
693 }
694
695 return index;
696}
697
698static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
699 [TCA_U32_CLASSID] = { .type = NLA_U32 },
700 [TCA_U32_HASH] = { .type = NLA_U32 },
701 [TCA_U32_LINK] = { .type = NLA_U32 },
702 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
703 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
704 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
705 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
706 [TCA_U32_FLAGS] = { .type = NLA_U32 },
707};
708
709static int u32_set_parms(struct net *net, struct tcf_proto *tp,
710 unsigned long base,
711 struct tc_u_knode *n, struct nlattr **tb,
712 struct nlattr *est, bool ovr,
713 struct netlink_ext_ack *extack)
714{
715 int err;
716
717 err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, true, extack);
718 if (err < 0)
719 return err;
720
721 if (tb[TCA_U32_LINK]) {
722 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
723 struct tc_u_hnode *ht_down = NULL, *ht_old;
724
725 if (TC_U32_KEY(handle)) {
726 NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table");
727 return -EINVAL;
728 }
729
730 if (handle) {
731 ht_down = u32_lookup_ht(tp->data, handle);
732
733 if (!ht_down) {
734 NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
735 return -EINVAL;
736 }
737 if (ht_down->is_root) {
738 NL_SET_ERR_MSG_MOD(extack, "Not linking to root node");
739 return -EINVAL;
740 }
741 ht_down->refcnt++;
742 }
743
744 ht_old = rtnl_dereference(n->ht_down);
745 rcu_assign_pointer(n->ht_down, ht_down);
746
747 if (ht_old)
748 ht_old->refcnt--;
749 }
750 if (tb[TCA_U32_CLASSID]) {
751 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
752 tcf_bind_filter(tp, &n->res, base);
753 }
754
755 if (tb[TCA_U32_INDEV]) {
756 int ret;
757 ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
758 if (ret < 0)
759 return -EINVAL;
760 n->ifindex = ret;
761 }
762 return 0;
763}
764
765static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
766 struct tc_u_knode *n)
767{
768 struct tc_u_knode __rcu **ins;
769 struct tc_u_knode *pins;
770 struct tc_u_hnode *ht;
771
772 if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
773 ht = rtnl_dereference(tp->root);
774 else
775 ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
776
777 ins = &ht->ht[TC_U32_HASH(n->handle)];
778
779 /* The node must always exist for it to be replaced if this is not the
780 * case then something went very wrong elsewhere.
781 */
782 for (pins = rtnl_dereference(*ins); ;
783 ins = &pins->next, pins = rtnl_dereference(*ins))
784 if (pins->handle == n->handle)
785 break;
786
787 idr_replace(&ht->handle_idr, n, n->handle);
788 RCU_INIT_POINTER(n->next, pins->next);
789 rcu_assign_pointer(*ins, n);
790}
791
792static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
793 struct tc_u_knode *n)
794{
795 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
796 struct tc_u32_sel *s = &n->sel;
797 struct tc_u_knode *new;
798
799 new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key),
800 GFP_KERNEL);
801
802 if (!new)
803 return NULL;
804
805 RCU_INIT_POINTER(new->next, n->next);
806 new->handle = n->handle;
807 RCU_INIT_POINTER(new->ht_up, n->ht_up);
808
809 new->ifindex = n->ifindex;
810 new->fshift = n->fshift;
811 new->res = n->res;
812 new->flags = n->flags;
813 RCU_INIT_POINTER(new->ht_down, ht);
814
815 /* bump reference count as long as we hold pointer to structure */
816 if (ht)
817 ht->refcnt++;
818
819#ifdef CONFIG_CLS_U32_PERF
820 /* Statistics may be incremented by readers during update
821 * so we must keep them in tact. When the node is later destroyed
822 * a special destroy call must be made to not free the pf memory.
823 */
824 new->pf = n->pf;
825#endif
826
827#ifdef CONFIG_CLS_U32_MARK
828 new->val = n->val;
829 new->mask = n->mask;
830 /* Similarly success statistics must be moved as pointers */
831 new->pcpu_success = n->pcpu_success;
832#endif
833 memcpy(&new->sel, s, struct_size(s, keys, s->nkeys));
834
835 if (tcf_exts_init(&new->exts, net, TCA_U32_ACT, TCA_U32_POLICE)) {
836 kfree(new);
837 return NULL;
838 }
839
840 return new;
841}
842
843static int u32_change(struct net *net, struct sk_buff *in_skb,
844 struct tcf_proto *tp, unsigned long base, u32 handle,
845 struct nlattr **tca, void **arg, bool ovr, bool rtnl_held,
846 struct netlink_ext_ack *extack)
847{
848 struct tc_u_common *tp_c = tp->data;
849 struct tc_u_hnode *ht;
850 struct tc_u_knode *n;
851 struct tc_u32_sel *s;
852 struct nlattr *opt = tca[TCA_OPTIONS];
853 struct nlattr *tb[TCA_U32_MAX + 1];
854 u32 htid, flags = 0;
855 size_t sel_size;
856 int err;
857#ifdef CONFIG_CLS_U32_PERF
858 size_t size;
859#endif
860
861 if (!opt) {
862 if (handle) {
863 NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options");
864 return -EINVAL;
865 } else {
866 return 0;
867 }
868 }
869
870 err = nla_parse_nested_deprecated(tb, TCA_U32_MAX, opt, u32_policy,
871 extack);
872 if (err < 0)
873 return err;
874
875 if (tb[TCA_U32_FLAGS]) {
876 flags = nla_get_u32(tb[TCA_U32_FLAGS]);
877 if (!tc_flags_valid(flags)) {
878 NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags");
879 return -EINVAL;
880 }
881 }
882
883 n = *arg;
884 if (n) {
885 struct tc_u_knode *new;
886
887 if (TC_U32_KEY(n->handle) == 0) {
888 NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero");
889 return -EINVAL;
890 }
891
892 if ((n->flags ^ flags) &
893 ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) {
894 NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
895 return -EINVAL;
896 }
897
898 new = u32_init_knode(net, tp, n);
899 if (!new)
900 return -ENOMEM;
901
902 err = u32_set_parms(net, tp, base, new, tb,
903 tca[TCA_RATE], ovr, extack);
904
905 if (err) {
906 u32_destroy_key(new, false);
907 return err;
908 }
909
910 err = u32_replace_hw_knode(tp, new, flags, extack);
911 if (err) {
912 u32_destroy_key(new, false);
913 return err;
914 }
915
916 if (!tc_in_hw(new->flags))
917 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
918
919 u32_replace_knode(tp, tp_c, new);
920 tcf_unbind_filter(tp, &n->res);
921 tcf_exts_get_net(&n->exts);
922 tcf_queue_work(&n->rwork, u32_delete_key_work);
923 return 0;
924 }
925
926 if (tb[TCA_U32_DIVISOR]) {
927 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
928
929 if (!is_power_of_2(divisor)) {
930 NL_SET_ERR_MSG_MOD(extack, "Divisor is not a power of 2");
931 return -EINVAL;
932 }
933 if (divisor-- > 0x100) {
934 NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
935 return -EINVAL;
936 }
937 if (TC_U32_KEY(handle)) {
938 NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
939 return -EINVAL;
940 }
941 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
942 if (ht == NULL)
943 return -ENOBUFS;
944 if (handle == 0) {
945 handle = gen_new_htid(tp->data, ht);
946 if (handle == 0) {
947 kfree(ht);
948 return -ENOMEM;
949 }
950 } else {
951 err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle,
952 handle, GFP_KERNEL);
953 if (err) {
954 kfree(ht);
955 return err;
956 }
957 }
958 ht->refcnt = 1;
959 ht->divisor = divisor;
960 ht->handle = handle;
961 ht->prio = tp->prio;
962 idr_init(&ht->handle_idr);
963 ht->flags = flags;
964
965 err = u32_replace_hw_hnode(tp, ht, flags, extack);
966 if (err) {
967 idr_remove(&tp_c->handle_idr, handle);
968 kfree(ht);
969 return err;
970 }
971
972 RCU_INIT_POINTER(ht->next, tp_c->hlist);
973 rcu_assign_pointer(tp_c->hlist, ht);
974 *arg = ht;
975
976 return 0;
977 }
978
979 if (tb[TCA_U32_HASH]) {
980 htid = nla_get_u32(tb[TCA_U32_HASH]);
981 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
982 ht = rtnl_dereference(tp->root);
983 htid = ht->handle;
984 } else {
985 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
986 if (!ht) {
987 NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found");
988 return -EINVAL;
989 }
990 }
991 } else {
992 ht = rtnl_dereference(tp->root);
993 htid = ht->handle;
994 }
995
996 if (ht->divisor < TC_U32_HASH(htid)) {
997 NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value");
998 return -EINVAL;
999 }
1000
1001 if (handle) {
1002 if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
1003 NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
1004 return -EINVAL;
1005 }
1006 handle = htid | TC_U32_NODE(handle);
1007 err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle,
1008 GFP_KERNEL);
1009 if (err)
1010 return err;
1011 } else
1012 handle = gen_new_kid(ht, htid);
1013
1014 if (tb[TCA_U32_SEL] == NULL) {
1015 NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
1016 err = -EINVAL;
1017 goto erridr;
1018 }
1019
1020 s = nla_data(tb[TCA_U32_SEL]);
1021 sel_size = struct_size(s, keys, s->nkeys);
1022 if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
1023 err = -EINVAL;
1024 goto erridr;
1025 }
1026
1027 n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL);
1028 if (n == NULL) {
1029 err = -ENOBUFS;
1030 goto erridr;
1031 }
1032
1033#ifdef CONFIG_CLS_U32_PERF
1034 size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64);
1035 n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt));
1036 if (!n->pf) {
1037 err = -ENOBUFS;
1038 goto errfree;
1039 }
1040#endif
1041
1042 memcpy(&n->sel, s, sel_size);
1043 RCU_INIT_POINTER(n->ht_up, ht);
1044 n->handle = handle;
1045 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
1046 n->flags = flags;
1047
1048 err = tcf_exts_init(&n->exts, net, TCA_U32_ACT, TCA_U32_POLICE);
1049 if (err < 0)
1050 goto errout;
1051
1052#ifdef CONFIG_CLS_U32_MARK
1053 n->pcpu_success = alloc_percpu(u32);
1054 if (!n->pcpu_success) {
1055 err = -ENOMEM;
1056 goto errout;
1057 }
1058
1059 if (tb[TCA_U32_MARK]) {
1060 struct tc_u32_mark *mark;
1061
1062 mark = nla_data(tb[TCA_U32_MARK]);
1063 n->val = mark->val;
1064 n->mask = mark->mask;
1065 }
1066#endif
1067
1068 err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE], ovr,
1069 extack);
1070 if (err == 0) {
1071 struct tc_u_knode __rcu **ins;
1072 struct tc_u_knode *pins;
1073
1074 err = u32_replace_hw_knode(tp, n, flags, extack);
1075 if (err)
1076 goto errhw;
1077
1078 if (!tc_in_hw(n->flags))
1079 n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1080
1081 ins = &ht->ht[TC_U32_HASH(handle)];
1082 for (pins = rtnl_dereference(*ins); pins;
1083 ins = &pins->next, pins = rtnl_dereference(*ins))
1084 if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
1085 break;
1086
1087 RCU_INIT_POINTER(n->next, pins);
1088 rcu_assign_pointer(*ins, n);
1089 tp_c->knodes++;
1090 *arg = n;
1091 return 0;
1092 }
1093
1094errhw:
1095#ifdef CONFIG_CLS_U32_MARK
1096 free_percpu(n->pcpu_success);
1097#endif
1098
1099errout:
1100 tcf_exts_destroy(&n->exts);
1101#ifdef CONFIG_CLS_U32_PERF
1102errfree:
1103 free_percpu(n->pf);
1104#endif
1105 kfree(n);
1106erridr:
1107 idr_remove(&ht->handle_idr, handle);
1108 return err;
1109}
1110
1111static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1112 bool rtnl_held)
1113{
1114 struct tc_u_common *tp_c = tp->data;
1115 struct tc_u_hnode *ht;
1116 struct tc_u_knode *n;
1117 unsigned int h;
1118
1119 if (arg->stop)
1120 return;
1121
1122 for (ht = rtnl_dereference(tp_c->hlist);
1123 ht;
1124 ht = rtnl_dereference(ht->next)) {
1125 if (ht->prio != tp->prio)
1126 continue;
1127 if (arg->count >= arg->skip) {
1128 if (arg->fn(tp, ht, arg) < 0) {
1129 arg->stop = 1;
1130 return;
1131 }
1132 }
1133 arg->count++;
1134 for (h = 0; h <= ht->divisor; h++) {
1135 for (n = rtnl_dereference(ht->ht[h]);
1136 n;
1137 n = rtnl_dereference(n->next)) {
1138 if (arg->count < arg->skip) {
1139 arg->count++;
1140 continue;
1141 }
1142 if (arg->fn(tp, n, arg) < 0) {
1143 arg->stop = 1;
1144 return;
1145 }
1146 arg->count++;
1147 }
1148 }
1149 }
1150}
1151
1152static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
1153 bool add, flow_setup_cb_t *cb, void *cb_priv,
1154 struct netlink_ext_ack *extack)
1155{
1156 struct tc_cls_u32_offload cls_u32 = {};
1157 int err;
1158
1159 tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
1160 cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
1161 cls_u32.hnode.divisor = ht->divisor;
1162 cls_u32.hnode.handle = ht->handle;
1163 cls_u32.hnode.prio = ht->prio;
1164
1165 err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
1166 if (err && add && tc_skip_sw(ht->flags))
1167 return err;
1168
1169 return 0;
1170}
1171
1172static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
1173 bool add, flow_setup_cb_t *cb, void *cb_priv,
1174 struct netlink_ext_ack *extack)
1175{
1176 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
1177 struct tcf_block *block = tp->chain->block;
1178 struct tc_cls_u32_offload cls_u32 = {};
1179 int err;
1180
1181 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
1182 cls_u32.command = add ?
1183 TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
1184 cls_u32.knode.handle = n->handle;
1185
1186 if (add) {
1187 cls_u32.knode.fshift = n->fshift;
1188#ifdef CONFIG_CLS_U32_MARK
1189 cls_u32.knode.val = n->val;
1190 cls_u32.knode.mask = n->mask;
1191#else
1192 cls_u32.knode.val = 0;
1193 cls_u32.knode.mask = 0;
1194#endif
1195 cls_u32.knode.sel = &n->sel;
1196 cls_u32.knode.res = &n->res;
1197 cls_u32.knode.exts = &n->exts;
1198 if (n->ht_down)
1199 cls_u32.knode.link_handle = ht->handle;
1200 }
1201
1202 err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32,
1203 &cls_u32, cb_priv, &n->flags,
1204 &n->in_hw_count);
1205 if (err)
1206 return err;
1207
1208 return 0;
1209}
1210
1211static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1212 void *cb_priv, struct netlink_ext_ack *extack)
1213{
1214 struct tc_u_common *tp_c = tp->data;
1215 struct tc_u_hnode *ht;
1216 struct tc_u_knode *n;
1217 unsigned int h;
1218 int err;
1219
1220 for (ht = rtnl_dereference(tp_c->hlist);
1221 ht;
1222 ht = rtnl_dereference(ht->next)) {
1223 if (ht->prio != tp->prio)
1224 continue;
1225
1226 /* When adding filters to a new dev, try to offload the
1227 * hashtable first. When removing, do the filters before the
1228 * hashtable.
1229 */
1230 if (add && !tc_skip_hw(ht->flags)) {
1231 err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
1232 extack);
1233 if (err)
1234 return err;
1235 }
1236
1237 for (h = 0; h <= ht->divisor; h++) {
1238 for (n = rtnl_dereference(ht->ht[h]);
1239 n;
1240 n = rtnl_dereference(n->next)) {
1241 if (tc_skip_hw(n->flags))
1242 continue;
1243
1244 err = u32_reoffload_knode(tp, n, add, cb,
1245 cb_priv, extack);
1246 if (err)
1247 return err;
1248 }
1249 }
1250
1251 if (!add && !tc_skip_hw(ht->flags))
1252 u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
1253 }
1254
1255 return 0;
1256}
1257
1258static void u32_bind_class(void *fh, u32 classid, unsigned long cl)
1259{
1260 struct tc_u_knode *n = fh;
1261
1262 if (n && n->res.classid == classid)
1263 n->res.class = cl;
1264}
1265
1266static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
1267 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
1268{
1269 struct tc_u_knode *n = fh;
1270 struct tc_u_hnode *ht_up, *ht_down;
1271 struct nlattr *nest;
1272
1273 if (n == NULL)
1274 return skb->len;
1275
1276 t->tcm_handle = n->handle;
1277
1278 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1279 if (nest == NULL)
1280 goto nla_put_failure;
1281
1282 if (TC_U32_KEY(n->handle) == 0) {
1283 struct tc_u_hnode *ht = fh;
1284 u32 divisor = ht->divisor + 1;
1285
1286 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1287 goto nla_put_failure;
1288 } else {
1289#ifdef CONFIG_CLS_U32_PERF
1290 struct tc_u32_pcnt *gpf;
1291 int cpu;
1292#endif
1293
1294 if (nla_put(skb, TCA_U32_SEL,
1295 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
1296 &n->sel))
1297 goto nla_put_failure;
1298
1299 ht_up = rtnl_dereference(n->ht_up);
1300 if (ht_up) {
1301 u32 htid = n->handle & 0xFFFFF000;
1302 if (nla_put_u32(skb, TCA_U32_HASH, htid))
1303 goto nla_put_failure;
1304 }
1305 if (n->res.classid &&
1306 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1307 goto nla_put_failure;
1308
1309 ht_down = rtnl_dereference(n->ht_down);
1310 if (ht_down &&
1311 nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
1312 goto nla_put_failure;
1313
1314 if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1315 goto nla_put_failure;
1316
1317#ifdef CONFIG_CLS_U32_MARK
1318 if ((n->val || n->mask)) {
1319 struct tc_u32_mark mark = {.val = n->val,
1320 .mask = n->mask,
1321 .success = 0};
1322 int cpum;
1323
1324 for_each_possible_cpu(cpum) {
1325 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
1326
1327 mark.success += cnt;
1328 }
1329
1330 if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1331 goto nla_put_failure;
1332 }
1333#endif
1334
1335 if (tcf_exts_dump(skb, &n->exts) < 0)
1336 goto nla_put_failure;
1337
1338 if (n->ifindex) {
1339 struct net_device *dev;
1340 dev = __dev_get_by_index(net, n->ifindex);
1341 if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1342 goto nla_put_failure;
1343 }
1344#ifdef CONFIG_CLS_U32_PERF
1345 gpf = kzalloc(sizeof(struct tc_u32_pcnt) +
1346 n->sel.nkeys * sizeof(u64),
1347 GFP_KERNEL);
1348 if (!gpf)
1349 goto nla_put_failure;
1350
1351 for_each_possible_cpu(cpu) {
1352 int i;
1353 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1354
1355 gpf->rcnt += pf->rcnt;
1356 gpf->rhit += pf->rhit;
1357 for (i = 0; i < n->sel.nkeys; i++)
1358 gpf->kcnts[i] += pf->kcnts[i];
1359 }
1360
1361 if (nla_put_64bit(skb, TCA_U32_PCNT,
1362 sizeof(struct tc_u32_pcnt) +
1363 n->sel.nkeys * sizeof(u64),
1364 gpf, TCA_U32_PAD)) {
1365 kfree(gpf);
1366 goto nla_put_failure;
1367 }
1368 kfree(gpf);
1369#endif
1370 }
1371
1372 nla_nest_end(skb, nest);
1373
1374 if (TC_U32_KEY(n->handle))
1375 if (tcf_exts_dump_stats(skb, &n->exts) < 0)
1376 goto nla_put_failure;
1377 return skb->len;
1378
1379nla_put_failure:
1380 nla_nest_cancel(skb, nest);
1381 return -1;
1382}
1383
1384static struct tcf_proto_ops cls_u32_ops __read_mostly = {
1385 .kind = "u32",
1386 .classify = u32_classify,
1387 .init = u32_init,
1388 .destroy = u32_destroy,
1389 .get = u32_get,
1390 .change = u32_change,
1391 .delete = u32_delete,
1392 .walk = u32_walk,
1393 .reoffload = u32_reoffload,
1394 .dump = u32_dump,
1395 .bind_class = u32_bind_class,
1396 .owner = THIS_MODULE,
1397};
1398
1399static int __init init_u32(void)
1400{
1401 int i, ret;
1402
1403 pr_info("u32 classifier\n");
1404#ifdef CONFIG_CLS_U32_PERF
1405 pr_info(" Performance counters on\n");
1406#endif
1407 pr_info(" input device check on\n");
1408#ifdef CONFIG_NET_CLS_ACT
1409 pr_info(" Actions configured\n");
1410#endif
1411 tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE,
1412 sizeof(struct hlist_head),
1413 GFP_KERNEL);
1414 if (!tc_u_common_hash)
1415 return -ENOMEM;
1416
1417 for (i = 0; i < U32_HASH_SIZE; i++)
1418 INIT_HLIST_HEAD(&tc_u_common_hash[i]);
1419
1420 ret = register_tcf_proto_ops(&cls_u32_ops);
1421 if (ret)
1422 kvfree(tc_u_common_hash);
1423 return ret;
1424}
1425
1426static void __exit exit_u32(void)
1427{
1428 unregister_tcf_proto_ops(&cls_u32_ops);
1429 kvfree(tc_u_common_hash);
1430}
1431
1432module_init(init_u32)
1433module_exit(exit_u32)
1434MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * The filters are packed to hash tables of key nodes
8 * with a set of 32bit key/mask pairs at every node.
9 * Nodes reference next level hash tables etc.
10 *
11 * This scheme is the best universal classifier I managed to
12 * invent; it is not super-fast, but it is not slow (provided you
13 * program it correctly), and general enough. And its relative
14 * speed grows as the number of rules becomes larger.
15 *
16 * It seems that it represents the best middle point between
17 * speed and manageability both by human and by machine.
18 *
19 * It is especially useful for link sharing combined with QoS;
20 * pure RSVP doesn't need such a general approach and can use
21 * much simpler (and faster) schemes, sort of cls_rsvp.c.
22 *
23 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
24 */
25
26#include <linux/module.h>
27#include <linux/slab.h>
28#include <linux/types.h>
29#include <linux/kernel.h>
30#include <linux/string.h>
31#include <linux/errno.h>
32#include <linux/percpu.h>
33#include <linux/rtnetlink.h>
34#include <linux/skbuff.h>
35#include <linux/bitmap.h>
36#include <linux/netdevice.h>
37#include <linux/hash.h>
38#include <net/netlink.h>
39#include <net/act_api.h>
40#include <net/pkt_cls.h>
41#include <linux/idr.h>
42#include <net/tc_wrapper.h>
43
44struct tc_u_knode {
45 struct tc_u_knode __rcu *next;
46 u32 handle;
47 struct tc_u_hnode __rcu *ht_up;
48 struct tcf_exts exts;
49 int ifindex;
50 u8 fshift;
51 struct tcf_result res;
52 struct tc_u_hnode __rcu *ht_down;
53#ifdef CONFIG_CLS_U32_PERF
54 struct tc_u32_pcnt __percpu *pf;
55#endif
56 u32 flags;
57 unsigned int in_hw_count;
58#ifdef CONFIG_CLS_U32_MARK
59 u32 val;
60 u32 mask;
61 u32 __percpu *pcpu_success;
62#endif
63 struct rcu_work rwork;
64 /* The 'sel' field MUST be the last field in structure to allow for
65 * tc_u32_keys allocated at end of structure.
66 */
67 struct tc_u32_sel sel;
68};
69
70struct tc_u_hnode {
71 struct tc_u_hnode __rcu *next;
72 u32 handle;
73 u32 prio;
74 refcount_t refcnt;
75 unsigned int divisor;
76 struct idr handle_idr;
77 bool is_root;
78 struct rcu_head rcu;
79 u32 flags;
80 /* The 'ht' field MUST be the last field in structure to allow for
81 * more entries allocated at end of structure.
82 */
83 struct tc_u_knode __rcu *ht[];
84};
85
86struct tc_u_common {
87 struct tc_u_hnode __rcu *hlist;
88 void *ptr;
89 refcount_t refcnt;
90 struct idr handle_idr;
91 struct hlist_node hnode;
92 long knodes;
93};
94
95static inline unsigned int u32_hash_fold(__be32 key,
96 const struct tc_u32_sel *sel,
97 u8 fshift)
98{
99 unsigned int h = ntohl(key & sel->hmask) >> fshift;
100
101 return h;
102}
103
104TC_INDIRECT_SCOPE int u32_classify(struct sk_buff *skb,
105 const struct tcf_proto *tp,
106 struct tcf_result *res)
107{
108 struct {
109 struct tc_u_knode *knode;
110 unsigned int off;
111 } stack[TC_U32_MAXDEPTH];
112
113 struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
114 unsigned int off = skb_network_offset(skb);
115 struct tc_u_knode *n;
116 int sdepth = 0;
117 int off2 = 0;
118 int sel = 0;
119#ifdef CONFIG_CLS_U32_PERF
120 int j;
121#endif
122 int i, r;
123
124next_ht:
125 n = rcu_dereference_bh(ht->ht[sel]);
126
127next_knode:
128 if (n) {
129 struct tc_u32_key *key = n->sel.keys;
130
131#ifdef CONFIG_CLS_U32_PERF
132 __this_cpu_inc(n->pf->rcnt);
133 j = 0;
134#endif
135
136 if (tc_skip_sw(n->flags)) {
137 n = rcu_dereference_bh(n->next);
138 goto next_knode;
139 }
140
141#ifdef CONFIG_CLS_U32_MARK
142 if ((skb->mark & n->mask) != n->val) {
143 n = rcu_dereference_bh(n->next);
144 goto next_knode;
145 } else {
146 __this_cpu_inc(*n->pcpu_success);
147 }
148#endif
149
150 for (i = n->sel.nkeys; i > 0; i--, key++) {
151 int toff = off + key->off + (off2 & key->offmask);
152 __be32 *data, hdata;
153
154 if (skb_headroom(skb) + toff > INT_MAX)
155 goto out;
156
157 data = skb_header_pointer(skb, toff, 4, &hdata);
158 if (!data)
159 goto out;
160 if ((*data ^ key->val) & key->mask) {
161 n = rcu_dereference_bh(n->next);
162 goto next_knode;
163 }
164#ifdef CONFIG_CLS_U32_PERF
165 __this_cpu_inc(n->pf->kcnts[j]);
166 j++;
167#endif
168 }
169
170 ht = rcu_dereference_bh(n->ht_down);
171 if (!ht) {
172check_terminal:
173 if (n->sel.flags & TC_U32_TERMINAL) {
174
175 *res = n->res;
176 if (!tcf_match_indev(skb, n->ifindex)) {
177 n = rcu_dereference_bh(n->next);
178 goto next_knode;
179 }
180#ifdef CONFIG_CLS_U32_PERF
181 __this_cpu_inc(n->pf->rhit);
182#endif
183 r = tcf_exts_exec(skb, &n->exts, res);
184 if (r < 0) {
185 n = rcu_dereference_bh(n->next);
186 goto next_knode;
187 }
188
189 return r;
190 }
191 n = rcu_dereference_bh(n->next);
192 goto next_knode;
193 }
194
195 /* PUSH */
196 if (sdepth >= TC_U32_MAXDEPTH)
197 goto deadloop;
198 stack[sdepth].knode = n;
199 stack[sdepth].off = off;
200 sdepth++;
201
202 ht = rcu_dereference_bh(n->ht_down);
203 sel = 0;
204 if (ht->divisor) {
205 __be32 *data, hdata;
206
207 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
208 &hdata);
209 if (!data)
210 goto out;
211 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
212 n->fshift);
213 }
214 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
215 goto next_ht;
216
217 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
218 off2 = n->sel.off + 3;
219 if (n->sel.flags & TC_U32_VAROFFSET) {
220 __be16 *data, hdata;
221
222 data = skb_header_pointer(skb,
223 off + n->sel.offoff,
224 2, &hdata);
225 if (!data)
226 goto out;
227 off2 += ntohs(n->sel.offmask & *data) >>
228 n->sel.offshift;
229 }
230 off2 &= ~3;
231 }
232 if (n->sel.flags & TC_U32_EAT) {
233 off += off2;
234 off2 = 0;
235 }
236
237 if (off < skb->len)
238 goto next_ht;
239 }
240
241 /* POP */
242 if (sdepth--) {
243 n = stack[sdepth].knode;
244 ht = rcu_dereference_bh(n->ht_up);
245 off = stack[sdepth].off;
246 goto check_terminal;
247 }
248out:
249 return -1;
250
251deadloop:
252 net_warn_ratelimited("cls_u32: dead loop\n");
253 return -1;
254}
255
256static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
257{
258 struct tc_u_hnode *ht;
259
260 for (ht = rtnl_dereference(tp_c->hlist);
261 ht;
262 ht = rtnl_dereference(ht->next))
263 if (ht->handle == handle)
264 break;
265
266 return ht;
267}
268
269static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
270{
271 unsigned int sel;
272 struct tc_u_knode *n = NULL;
273
274 sel = TC_U32_HASH(handle);
275 if (sel > ht->divisor)
276 goto out;
277
278 for (n = rtnl_dereference(ht->ht[sel]);
279 n;
280 n = rtnl_dereference(n->next))
281 if (n->handle == handle)
282 break;
283out:
284 return n;
285}
286
287
288static void *u32_get(struct tcf_proto *tp, u32 handle)
289{
290 struct tc_u_hnode *ht;
291 struct tc_u_common *tp_c = tp->data;
292
293 if (TC_U32_HTID(handle) == TC_U32_ROOT)
294 ht = rtnl_dereference(tp->root);
295 else
296 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
297
298 if (!ht)
299 return NULL;
300
301 if (TC_U32_KEY(handle) == 0)
302 return ht;
303
304 return u32_lookup_key(ht, handle);
305}
306
307/* Protected by rtnl lock */
308static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
309{
310 int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
311 if (id < 0)
312 return 0;
313 return (id | 0x800U) << 20;
314}
315
316static struct hlist_head *tc_u_common_hash;
317
318#define U32_HASH_SHIFT 10
319#define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
320
321static void *tc_u_common_ptr(const struct tcf_proto *tp)
322{
323 struct tcf_block *block = tp->chain->block;
324
325 /* The block sharing is currently supported only
326 * for classless qdiscs. In that case we use block
327 * for tc_u_common identification. In case the
328 * block is not shared, block->q is a valid pointer
329 * and we can use that. That works for classful qdiscs.
330 */
331 if (tcf_block_shared(block))
332 return block;
333 else
334 return block->q;
335}
336
337static struct hlist_head *tc_u_hash(void *key)
338{
339 return tc_u_common_hash + hash_ptr(key, U32_HASH_SHIFT);
340}
341
342static struct tc_u_common *tc_u_common_find(void *key)
343{
344 struct tc_u_common *tc;
345 hlist_for_each_entry(tc, tc_u_hash(key), hnode) {
346 if (tc->ptr == key)
347 return tc;
348 }
349 return NULL;
350}
351
352static int u32_init(struct tcf_proto *tp)
353{
354 struct tc_u_hnode *root_ht;
355 void *key = tc_u_common_ptr(tp);
356 struct tc_u_common *tp_c = tc_u_common_find(key);
357
358 root_ht = kzalloc(struct_size(root_ht, ht, 1), GFP_KERNEL);
359 if (root_ht == NULL)
360 return -ENOBUFS;
361
362 refcount_set(&root_ht->refcnt, 1);
363 root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
364 root_ht->prio = tp->prio;
365 root_ht->is_root = true;
366 idr_init(&root_ht->handle_idr);
367
368 if (tp_c == NULL) {
369 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
370 if (tp_c == NULL) {
371 kfree(root_ht);
372 return -ENOBUFS;
373 }
374 refcount_set(&tp_c->refcnt, 1);
375 tp_c->ptr = key;
376 INIT_HLIST_NODE(&tp_c->hnode);
377 idr_init(&tp_c->handle_idr);
378
379 hlist_add_head(&tp_c->hnode, tc_u_hash(key));
380 } else {
381 refcount_inc(&tp_c->refcnt);
382 }
383
384 RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
385 rcu_assign_pointer(tp_c->hlist, root_ht);
386
387 /* root_ht must be destroyed when tcf_proto is destroyed */
388 rcu_assign_pointer(tp->root, root_ht);
389 tp->data = tp_c;
390 return 0;
391}
392
393static void __u32_destroy_key(struct tc_u_knode *n)
394{
395 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
396
397 tcf_exts_destroy(&n->exts);
398 if (ht && refcount_dec_and_test(&ht->refcnt))
399 kfree(ht);
400 kfree(n);
401}
402
403static void u32_destroy_key(struct tc_u_knode *n, bool free_pf)
404{
405 tcf_exts_put_net(&n->exts);
406#ifdef CONFIG_CLS_U32_PERF
407 if (free_pf)
408 free_percpu(n->pf);
409#endif
410#ifdef CONFIG_CLS_U32_MARK
411 if (free_pf)
412 free_percpu(n->pcpu_success);
413#endif
414 __u32_destroy_key(n);
415}
416
417/* u32_delete_key_rcu should be called when free'ing a copied
418 * version of a tc_u_knode obtained from u32_init_knode(). When
419 * copies are obtained from u32_init_knode() the statistics are
420 * shared between the old and new copies to allow readers to
421 * continue to update the statistics during the copy. To support
422 * this the u32_delete_key_rcu variant does not free the percpu
423 * statistics.
424 */
425static void u32_delete_key_work(struct work_struct *work)
426{
427 struct tc_u_knode *key = container_of(to_rcu_work(work),
428 struct tc_u_knode,
429 rwork);
430 rtnl_lock();
431 u32_destroy_key(key, false);
432 rtnl_unlock();
433}
434
435/* u32_delete_key_freepf_rcu is the rcu callback variant
436 * that free's the entire structure including the statistics
437 * percpu variables. Only use this if the key is not a copy
438 * returned by u32_init_knode(). See u32_delete_key_rcu()
439 * for the variant that should be used with keys return from
440 * u32_init_knode()
441 */
442static void u32_delete_key_freepf_work(struct work_struct *work)
443{
444 struct tc_u_knode *key = container_of(to_rcu_work(work),
445 struct tc_u_knode,
446 rwork);
447 rtnl_lock();
448 u32_destroy_key(key, true);
449 rtnl_unlock();
450}
451
452static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
453{
454 struct tc_u_common *tp_c = tp->data;
455 struct tc_u_knode __rcu **kp;
456 struct tc_u_knode *pkp;
457 struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
458
459 if (ht) {
460 kp = &ht->ht[TC_U32_HASH(key->handle)];
461 for (pkp = rtnl_dereference(*kp); pkp;
462 kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
463 if (pkp == key) {
464 RCU_INIT_POINTER(*kp, key->next);
465 tp_c->knodes--;
466
467 tcf_unbind_filter(tp, &key->res);
468 idr_remove(&ht->handle_idr, key->handle);
469 tcf_exts_get_net(&key->exts);
470 tcf_queue_work(&key->rwork, u32_delete_key_freepf_work);
471 return 0;
472 }
473 }
474 }
475 WARN_ON(1);
476 return 0;
477}
478
479static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
480 struct netlink_ext_ack *extack)
481{
482 struct tcf_block *block = tp->chain->block;
483 struct tc_cls_u32_offload cls_u32 = {};
484
485 tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
486 cls_u32.command = TC_CLSU32_DELETE_HNODE;
487 cls_u32.hnode.divisor = h->divisor;
488 cls_u32.hnode.handle = h->handle;
489 cls_u32.hnode.prio = h->prio;
490
491 tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, false, true);
492}
493
494static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
495 u32 flags, struct netlink_ext_ack *extack)
496{
497 struct tcf_block *block = tp->chain->block;
498 struct tc_cls_u32_offload cls_u32 = {};
499 bool skip_sw = tc_skip_sw(flags);
500 bool offloaded = false;
501 int err;
502
503 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
504 cls_u32.command = TC_CLSU32_NEW_HNODE;
505 cls_u32.hnode.divisor = h->divisor;
506 cls_u32.hnode.handle = h->handle;
507 cls_u32.hnode.prio = h->prio;
508
509 err = tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, skip_sw, true);
510 if (err < 0) {
511 u32_clear_hw_hnode(tp, h, NULL);
512 return err;
513 } else if (err > 0) {
514 offloaded = true;
515 }
516
517 if (skip_sw && !offloaded)
518 return -EINVAL;
519
520 return 0;
521}
522
523static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
524 struct netlink_ext_ack *extack)
525{
526 struct tcf_block *block = tp->chain->block;
527 struct tc_cls_u32_offload cls_u32 = {};
528
529 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
530 cls_u32.command = TC_CLSU32_DELETE_KNODE;
531 cls_u32.knode.handle = n->handle;
532
533 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSU32, &cls_u32, false,
534 &n->flags, &n->in_hw_count, true);
535}
536
537static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
538 u32 flags, struct netlink_ext_ack *extack)
539{
540 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
541 struct tcf_block *block = tp->chain->block;
542 struct tc_cls_u32_offload cls_u32 = {};
543 bool skip_sw = tc_skip_sw(flags);
544 int err;
545
546 tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
547 cls_u32.command = TC_CLSU32_REPLACE_KNODE;
548 cls_u32.knode.handle = n->handle;
549 cls_u32.knode.fshift = n->fshift;
550#ifdef CONFIG_CLS_U32_MARK
551 cls_u32.knode.val = n->val;
552 cls_u32.knode.mask = n->mask;
553#else
554 cls_u32.knode.val = 0;
555 cls_u32.knode.mask = 0;
556#endif
557 cls_u32.knode.sel = &n->sel;
558 cls_u32.knode.res = &n->res;
559 cls_u32.knode.exts = &n->exts;
560 if (n->ht_down)
561 cls_u32.knode.link_handle = ht->handle;
562
563 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSU32, &cls_u32, skip_sw,
564 &n->flags, &n->in_hw_count, true);
565 if (err) {
566 u32_remove_hw_knode(tp, n, NULL);
567 return err;
568 }
569
570 if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
571 return -EINVAL;
572
573 return 0;
574}
575
576static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
577 struct netlink_ext_ack *extack)
578{
579 struct tc_u_common *tp_c = tp->data;
580 struct tc_u_knode *n;
581 unsigned int h;
582
583 for (h = 0; h <= ht->divisor; h++) {
584 while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
585 RCU_INIT_POINTER(ht->ht[h],
586 rtnl_dereference(n->next));
587 tp_c->knodes--;
588 tcf_unbind_filter(tp, &n->res);
589 u32_remove_hw_knode(tp, n, extack);
590 idr_remove(&ht->handle_idr, n->handle);
591 if (tcf_exts_get_net(&n->exts))
592 tcf_queue_work(&n->rwork, u32_delete_key_freepf_work);
593 else
594 u32_destroy_key(n, true);
595 }
596 }
597}
598
599static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
600 struct netlink_ext_ack *extack)
601{
602 struct tc_u_common *tp_c = tp->data;
603 struct tc_u_hnode __rcu **hn;
604 struct tc_u_hnode *phn;
605
606 u32_clear_hnode(tp, ht, extack);
607
608 hn = &tp_c->hlist;
609 for (phn = rtnl_dereference(*hn);
610 phn;
611 hn = &phn->next, phn = rtnl_dereference(*hn)) {
612 if (phn == ht) {
613 u32_clear_hw_hnode(tp, ht, extack);
614 idr_destroy(&ht->handle_idr);
615 idr_remove(&tp_c->handle_idr, ht->handle);
616 RCU_INIT_POINTER(*hn, ht->next);
617 kfree_rcu(ht, rcu);
618 return 0;
619 }
620 }
621
622 return -ENOENT;
623}
624
625static void u32_destroy(struct tcf_proto *tp, bool rtnl_held,
626 struct netlink_ext_ack *extack)
627{
628 struct tc_u_common *tp_c = tp->data;
629 struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
630
631 WARN_ON(root_ht == NULL);
632
633 if (root_ht && refcount_dec_and_test(&root_ht->refcnt))
634 u32_destroy_hnode(tp, root_ht, extack);
635
636 if (refcount_dec_and_test(&tp_c->refcnt)) {
637 struct tc_u_hnode *ht;
638
639 hlist_del(&tp_c->hnode);
640
641 while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
642 u32_clear_hnode(tp, ht, extack);
643 RCU_INIT_POINTER(tp_c->hlist, ht->next);
644
645 /* u32_destroy_key() will later free ht for us, if it's
646 * still referenced by some knode
647 */
648 if (refcount_dec_and_test(&ht->refcnt))
649 kfree_rcu(ht, rcu);
650 }
651
652 idr_destroy(&tp_c->handle_idr);
653 kfree(tp_c);
654 }
655
656 tp->data = NULL;
657}
658
659static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
660 bool rtnl_held, struct netlink_ext_ack *extack)
661{
662 struct tc_u_hnode *ht = arg;
663 struct tc_u_common *tp_c = tp->data;
664 int ret = 0;
665
666 if (TC_U32_KEY(ht->handle)) {
667 u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
668 ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
669 goto out;
670 }
671
672 if (ht->is_root) {
673 NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
674 return -EINVAL;
675 }
676
677 if (refcount_dec_if_one(&ht->refcnt)) {
678 u32_destroy_hnode(tp, ht, extack);
679 } else {
680 NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
681 return -EBUSY;
682 }
683
684out:
685 *last = refcount_read(&tp_c->refcnt) == 1 && tp_c->knodes == 0;
686 return ret;
687}
688
689static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
690{
691 u32 index = htid | 0x800;
692 u32 max = htid | 0xFFF;
693
694 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) {
695 index = htid + 1;
696 if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max,
697 GFP_KERNEL))
698 index = max;
699 }
700
701 return index;
702}
703
704static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
705 [TCA_U32_CLASSID] = { .type = NLA_U32 },
706 [TCA_U32_HASH] = { .type = NLA_U32 },
707 [TCA_U32_LINK] = { .type = NLA_U32 },
708 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
709 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
710 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
711 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
712 [TCA_U32_FLAGS] = { .type = NLA_U32 },
713};
714
715static void u32_unbind_filter(struct tcf_proto *tp, struct tc_u_knode *n,
716 struct nlattr **tb)
717{
718 if (tb[TCA_U32_CLASSID])
719 tcf_unbind_filter(tp, &n->res);
720}
721
722static void u32_bind_filter(struct tcf_proto *tp, struct tc_u_knode *n,
723 unsigned long base, struct nlattr **tb)
724{
725 if (tb[TCA_U32_CLASSID]) {
726 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
727 tcf_bind_filter(tp, &n->res, base);
728 }
729}
730
731static int u32_set_parms(struct net *net, struct tcf_proto *tp,
732 struct tc_u_knode *n, struct nlattr **tb,
733 struct nlattr *est, u32 flags, u32 fl_flags,
734 struct netlink_ext_ack *extack)
735{
736 int err, ifindex = -1;
737
738 err = tcf_exts_validate_ex(net, tp, tb, est, &n->exts, flags,
739 fl_flags, extack);
740 if (err < 0)
741 return err;
742
743 if (tb[TCA_U32_INDEV]) {
744 ifindex = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
745 if (ifindex < 0)
746 return -EINVAL;
747 }
748
749 if (tb[TCA_U32_LINK]) {
750 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
751 struct tc_u_hnode *ht_down = NULL, *ht_old;
752
753 if (TC_U32_KEY(handle)) {
754 NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table");
755 return -EINVAL;
756 }
757
758 if (handle) {
759 ht_down = u32_lookup_ht(tp->data, handle);
760
761 if (!ht_down) {
762 NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
763 return -EINVAL;
764 }
765 if (ht_down->is_root) {
766 NL_SET_ERR_MSG_MOD(extack, "Not linking to root node");
767 return -EINVAL;
768 }
769 refcount_inc(&ht_down->refcnt);
770 }
771
772 ht_old = rtnl_dereference(n->ht_down);
773 rcu_assign_pointer(n->ht_down, ht_down);
774
775 if (ht_old)
776 refcount_dec(&ht_old->refcnt);
777 }
778
779 if (ifindex >= 0)
780 n->ifindex = ifindex;
781
782 return 0;
783}
784
785static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
786 struct tc_u_knode *n)
787{
788 struct tc_u_knode __rcu **ins;
789 struct tc_u_knode *pins;
790 struct tc_u_hnode *ht;
791
792 if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
793 ht = rtnl_dereference(tp->root);
794 else
795 ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
796
797 ins = &ht->ht[TC_U32_HASH(n->handle)];
798
799 /* The node must always exist for it to be replaced if this is not the
800 * case then something went very wrong elsewhere.
801 */
802 for (pins = rtnl_dereference(*ins); ;
803 ins = &pins->next, pins = rtnl_dereference(*ins))
804 if (pins->handle == n->handle)
805 break;
806
807 idr_replace(&ht->handle_idr, n, n->handle);
808 RCU_INIT_POINTER(n->next, pins->next);
809 rcu_assign_pointer(*ins, n);
810}
811
812static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
813 struct tc_u_knode *n)
814{
815 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
816 struct tc_u32_sel *s = &n->sel;
817 struct tc_u_knode *new;
818
819 new = kzalloc(struct_size(new, sel.keys, s->nkeys), GFP_KERNEL);
820 if (!new)
821 return NULL;
822
823 RCU_INIT_POINTER(new->next, n->next);
824 new->handle = n->handle;
825 RCU_INIT_POINTER(new->ht_up, n->ht_up);
826
827 new->ifindex = n->ifindex;
828 new->fshift = n->fshift;
829 new->flags = n->flags;
830 RCU_INIT_POINTER(new->ht_down, ht);
831
832#ifdef CONFIG_CLS_U32_PERF
833 /* Statistics may be incremented by readers during update
834 * so we must keep them in tact. When the node is later destroyed
835 * a special destroy call must be made to not free the pf memory.
836 */
837 new->pf = n->pf;
838#endif
839
840#ifdef CONFIG_CLS_U32_MARK
841 new->val = n->val;
842 new->mask = n->mask;
843 /* Similarly success statistics must be moved as pointers */
844 new->pcpu_success = n->pcpu_success;
845#endif
846 memcpy(&new->sel, s, struct_size(s, keys, s->nkeys));
847
848 if (tcf_exts_init(&new->exts, net, TCA_U32_ACT, TCA_U32_POLICE)) {
849 kfree(new);
850 return NULL;
851 }
852
853 /* bump reference count as long as we hold pointer to structure */
854 if (ht)
855 refcount_inc(&ht->refcnt);
856
857 return new;
858}
859
860static int u32_change(struct net *net, struct sk_buff *in_skb,
861 struct tcf_proto *tp, unsigned long base, u32 handle,
862 struct nlattr **tca, void **arg, u32 flags,
863 struct netlink_ext_ack *extack)
864{
865 struct tc_u_common *tp_c = tp->data;
866 struct tc_u_hnode *ht;
867 struct tc_u_knode *n;
868 struct tc_u32_sel *s;
869 struct nlattr *opt = tca[TCA_OPTIONS];
870 struct nlattr *tb[TCA_U32_MAX + 1];
871 u32 htid, userflags = 0;
872 size_t sel_size;
873 int err;
874
875 if (!opt) {
876 if (handle) {
877 NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options");
878 return -EINVAL;
879 } else {
880 return 0;
881 }
882 }
883
884 err = nla_parse_nested_deprecated(tb, TCA_U32_MAX, opt, u32_policy,
885 extack);
886 if (err < 0)
887 return err;
888
889 if (tb[TCA_U32_FLAGS]) {
890 userflags = nla_get_u32(tb[TCA_U32_FLAGS]);
891 if (!tc_flags_valid(userflags)) {
892 NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags");
893 return -EINVAL;
894 }
895 }
896
897 n = *arg;
898 if (n) {
899 struct tc_u_knode *new;
900
901 if (TC_U32_KEY(n->handle) == 0) {
902 NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero");
903 return -EINVAL;
904 }
905
906 if ((n->flags ^ userflags) &
907 ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) {
908 NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
909 return -EINVAL;
910 }
911
912 new = u32_init_knode(net, tp, n);
913 if (!new)
914 return -ENOMEM;
915
916 err = u32_set_parms(net, tp, new, tb, tca[TCA_RATE],
917 flags, new->flags, extack);
918
919 if (err) {
920 __u32_destroy_key(new);
921 return err;
922 }
923
924 u32_bind_filter(tp, new, base, tb);
925
926 err = u32_replace_hw_knode(tp, new, flags, extack);
927 if (err) {
928 u32_unbind_filter(tp, new, tb);
929
930 if (tb[TCA_U32_LINK]) {
931 struct tc_u_hnode *ht_old;
932
933 ht_old = rtnl_dereference(n->ht_down);
934 if (ht_old)
935 refcount_inc(&ht_old->refcnt);
936 }
937 __u32_destroy_key(new);
938 return err;
939 }
940
941 if (!tc_in_hw(new->flags))
942 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
943
944 u32_replace_knode(tp, tp_c, new);
945 tcf_unbind_filter(tp, &n->res);
946 tcf_exts_get_net(&n->exts);
947 tcf_queue_work(&n->rwork, u32_delete_key_work);
948 return 0;
949 }
950
951 if (tb[TCA_U32_DIVISOR]) {
952 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
953
954 if (!is_power_of_2(divisor)) {
955 NL_SET_ERR_MSG_MOD(extack, "Divisor is not a power of 2");
956 return -EINVAL;
957 }
958 if (divisor-- > 0x100) {
959 NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
960 return -EINVAL;
961 }
962 if (TC_U32_KEY(handle)) {
963 NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
964 return -EINVAL;
965 }
966 ht = kzalloc(struct_size(ht, ht, divisor + 1), GFP_KERNEL);
967 if (ht == NULL)
968 return -ENOBUFS;
969 if (handle == 0) {
970 handle = gen_new_htid(tp->data, ht);
971 if (handle == 0) {
972 kfree(ht);
973 return -ENOMEM;
974 }
975 } else {
976 err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle,
977 handle, GFP_KERNEL);
978 if (err) {
979 kfree(ht);
980 return err;
981 }
982 }
983 refcount_set(&ht->refcnt, 1);
984 ht->divisor = divisor;
985 ht->handle = handle;
986 ht->prio = tp->prio;
987 idr_init(&ht->handle_idr);
988 ht->flags = userflags;
989
990 err = u32_replace_hw_hnode(tp, ht, userflags, extack);
991 if (err) {
992 idr_remove(&tp_c->handle_idr, handle);
993 kfree(ht);
994 return err;
995 }
996
997 RCU_INIT_POINTER(ht->next, tp_c->hlist);
998 rcu_assign_pointer(tp_c->hlist, ht);
999 *arg = ht;
1000
1001 return 0;
1002 }
1003
1004 if (tb[TCA_U32_HASH]) {
1005 htid = nla_get_u32(tb[TCA_U32_HASH]);
1006 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
1007 ht = rtnl_dereference(tp->root);
1008 htid = ht->handle;
1009 } else {
1010 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
1011 if (!ht) {
1012 NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found");
1013 return -EINVAL;
1014 }
1015 }
1016 } else {
1017 ht = rtnl_dereference(tp->root);
1018 htid = ht->handle;
1019 }
1020
1021 if (ht->divisor < TC_U32_HASH(htid)) {
1022 NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value");
1023 return -EINVAL;
1024 }
1025
1026 /* At this point, we need to derive the new handle that will be used to
1027 * uniquely map the identity of this table match entry. The
1028 * identity of the entry that we need to construct is 32 bits made of:
1029 * htid(12b):bucketid(8b):node/entryid(12b)
1030 *
1031 * At this point _we have the table(ht)_ in which we will insert this
1032 * entry. We carry the table's id in variable "htid".
1033 * Note that earlier code picked the ht selection either by a) the user
1034 * providing the htid specified via TCA_U32_HASH attribute or b) when
1035 * no such attribute is passed then the root ht, is default to at ID
1036 * 0x[800][00][000]. Rule: the root table has a single bucket with ID 0.
1037 * If OTOH the user passed us the htid, they may also pass a bucketid of
1038 * choice. 0 is fine. For example a user htid is 0x[600][01][000] it is
1039 * indicating hash bucketid of 1. Rule: the entry/node ID _cannot_ be
1040 * passed via the htid, so even if it was non-zero it will be ignored.
1041 *
1042 * We may also have a handle, if the user passed one. The handle also
1043 * carries the same addressing of htid(12b):bucketid(8b):node/entryid(12b).
1044 * Rule: the bucketid on the handle is ignored even if one was passed;
1045 * rather the value on "htid" is always assumed to be the bucketid.
1046 */
1047 if (handle) {
1048 /* Rule: The htid from handle and tableid from htid must match */
1049 if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
1050 NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
1051 return -EINVAL;
1052 }
1053 /* Ok, so far we have a valid htid(12b):bucketid(8b) but we
1054 * need to finalize the table entry identification with the last
1055 * part - the node/entryid(12b)). Rule: Nodeid _cannot be 0_ for
1056 * entries. Rule: nodeid of 0 is reserved only for tables(see
1057 * earlier code which processes TC_U32_DIVISOR attribute).
1058 * Rule: The nodeid can only be derived from the handle (and not
1059 * htid).
1060 * Rule: if the handle specified zero for the node id example
1061 * 0x60000000, then pick a new nodeid from the pool of IDs
1062 * this hash table has been allocating from.
1063 * If OTOH it is specified (i.e for example the user passed a
1064 * handle such as 0x60000123), then we use it generate our final
1065 * handle which is used to uniquely identify the match entry.
1066 */
1067 if (!TC_U32_NODE(handle)) {
1068 handle = gen_new_kid(ht, htid);
1069 } else {
1070 handle = htid | TC_U32_NODE(handle);
1071 err = idr_alloc_u32(&ht->handle_idr, NULL, &handle,
1072 handle, GFP_KERNEL);
1073 if (err)
1074 return err;
1075 }
1076 } else {
1077 /* The user did not give us a handle; lets just generate one
1078 * from the table's pool of nodeids.
1079 */
1080 handle = gen_new_kid(ht, htid);
1081 }
1082
1083 if (tb[TCA_U32_SEL] == NULL) {
1084 NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
1085 err = -EINVAL;
1086 goto erridr;
1087 }
1088
1089 s = nla_data(tb[TCA_U32_SEL]);
1090 sel_size = struct_size(s, keys, s->nkeys);
1091 if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
1092 err = -EINVAL;
1093 goto erridr;
1094 }
1095
1096 n = kzalloc(struct_size(n, sel.keys, s->nkeys), GFP_KERNEL);
1097 if (n == NULL) {
1098 err = -ENOBUFS;
1099 goto erridr;
1100 }
1101
1102#ifdef CONFIG_CLS_U32_PERF
1103 n->pf = __alloc_percpu(struct_size(n->pf, kcnts, s->nkeys),
1104 __alignof__(struct tc_u32_pcnt));
1105 if (!n->pf) {
1106 err = -ENOBUFS;
1107 goto errfree;
1108 }
1109#endif
1110
1111 unsafe_memcpy(&n->sel, s, sel_size,
1112 /* A composite flex-array structure destination,
1113 * which was correctly sized with struct_size(),
1114 * bounds-checked against nla_len(), and allocated
1115 * above. */);
1116 RCU_INIT_POINTER(n->ht_up, ht);
1117 n->handle = handle;
1118 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
1119 n->flags = userflags;
1120
1121 err = tcf_exts_init(&n->exts, net, TCA_U32_ACT, TCA_U32_POLICE);
1122 if (err < 0)
1123 goto errout;
1124
1125#ifdef CONFIG_CLS_U32_MARK
1126 n->pcpu_success = alloc_percpu(u32);
1127 if (!n->pcpu_success) {
1128 err = -ENOMEM;
1129 goto errout;
1130 }
1131
1132 if (tb[TCA_U32_MARK]) {
1133 struct tc_u32_mark *mark;
1134
1135 mark = nla_data(tb[TCA_U32_MARK]);
1136 n->val = mark->val;
1137 n->mask = mark->mask;
1138 }
1139#endif
1140
1141 err = u32_set_parms(net, tp, n, tb, tca[TCA_RATE],
1142 flags, n->flags, extack);
1143
1144 u32_bind_filter(tp, n, base, tb);
1145
1146 if (err == 0) {
1147 struct tc_u_knode __rcu **ins;
1148 struct tc_u_knode *pins;
1149
1150 err = u32_replace_hw_knode(tp, n, flags, extack);
1151 if (err)
1152 goto errunbind;
1153
1154 if (!tc_in_hw(n->flags))
1155 n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1156
1157 ins = &ht->ht[TC_U32_HASH(handle)];
1158 for (pins = rtnl_dereference(*ins); pins;
1159 ins = &pins->next, pins = rtnl_dereference(*ins))
1160 if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
1161 break;
1162
1163 RCU_INIT_POINTER(n->next, pins);
1164 rcu_assign_pointer(*ins, n);
1165 tp_c->knodes++;
1166 *arg = n;
1167 return 0;
1168 }
1169
1170errunbind:
1171 u32_unbind_filter(tp, n, tb);
1172
1173#ifdef CONFIG_CLS_U32_MARK
1174 free_percpu(n->pcpu_success);
1175#endif
1176
1177errout:
1178 tcf_exts_destroy(&n->exts);
1179#ifdef CONFIG_CLS_U32_PERF
1180errfree:
1181 free_percpu(n->pf);
1182#endif
1183 kfree(n);
1184erridr:
1185 idr_remove(&ht->handle_idr, handle);
1186 return err;
1187}
1188
1189static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1190 bool rtnl_held)
1191{
1192 struct tc_u_common *tp_c = tp->data;
1193 struct tc_u_hnode *ht;
1194 struct tc_u_knode *n;
1195 unsigned int h;
1196
1197 if (arg->stop)
1198 return;
1199
1200 for (ht = rtnl_dereference(tp_c->hlist);
1201 ht;
1202 ht = rtnl_dereference(ht->next)) {
1203 if (ht->prio != tp->prio)
1204 continue;
1205
1206 if (!tc_cls_stats_dump(tp, arg, ht))
1207 return;
1208
1209 for (h = 0; h <= ht->divisor; h++) {
1210 for (n = rtnl_dereference(ht->ht[h]);
1211 n;
1212 n = rtnl_dereference(n->next)) {
1213 if (!tc_cls_stats_dump(tp, arg, n))
1214 return;
1215 }
1216 }
1217 }
1218}
1219
1220static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
1221 bool add, flow_setup_cb_t *cb, void *cb_priv,
1222 struct netlink_ext_ack *extack)
1223{
1224 struct tc_cls_u32_offload cls_u32 = {};
1225 int err;
1226
1227 tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
1228 cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
1229 cls_u32.hnode.divisor = ht->divisor;
1230 cls_u32.hnode.handle = ht->handle;
1231 cls_u32.hnode.prio = ht->prio;
1232
1233 err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
1234 if (err && add && tc_skip_sw(ht->flags))
1235 return err;
1236
1237 return 0;
1238}
1239
1240static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
1241 bool add, flow_setup_cb_t *cb, void *cb_priv,
1242 struct netlink_ext_ack *extack)
1243{
1244 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
1245 struct tcf_block *block = tp->chain->block;
1246 struct tc_cls_u32_offload cls_u32 = {};
1247
1248 tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
1249 cls_u32.command = add ?
1250 TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
1251 cls_u32.knode.handle = n->handle;
1252
1253 if (add) {
1254 cls_u32.knode.fshift = n->fshift;
1255#ifdef CONFIG_CLS_U32_MARK
1256 cls_u32.knode.val = n->val;
1257 cls_u32.knode.mask = n->mask;
1258#else
1259 cls_u32.knode.val = 0;
1260 cls_u32.knode.mask = 0;
1261#endif
1262 cls_u32.knode.sel = &n->sel;
1263 cls_u32.knode.res = &n->res;
1264 cls_u32.knode.exts = &n->exts;
1265 if (n->ht_down)
1266 cls_u32.knode.link_handle = ht->handle;
1267 }
1268
1269 return tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32,
1270 &cls_u32, cb_priv, &n->flags,
1271 &n->in_hw_count);
1272}
1273
1274static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1275 void *cb_priv, struct netlink_ext_ack *extack)
1276{
1277 struct tc_u_common *tp_c = tp->data;
1278 struct tc_u_hnode *ht;
1279 struct tc_u_knode *n;
1280 unsigned int h;
1281 int err;
1282
1283 for (ht = rtnl_dereference(tp_c->hlist);
1284 ht;
1285 ht = rtnl_dereference(ht->next)) {
1286 if (ht->prio != tp->prio)
1287 continue;
1288
1289 /* When adding filters to a new dev, try to offload the
1290 * hashtable first. When removing, do the filters before the
1291 * hashtable.
1292 */
1293 if (add && !tc_skip_hw(ht->flags)) {
1294 err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
1295 extack);
1296 if (err)
1297 return err;
1298 }
1299
1300 for (h = 0; h <= ht->divisor; h++) {
1301 for (n = rtnl_dereference(ht->ht[h]);
1302 n;
1303 n = rtnl_dereference(n->next)) {
1304 if (tc_skip_hw(n->flags))
1305 continue;
1306
1307 err = u32_reoffload_knode(tp, n, add, cb,
1308 cb_priv, extack);
1309 if (err)
1310 return err;
1311 }
1312 }
1313
1314 if (!add && !tc_skip_hw(ht->flags))
1315 u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
1316 }
1317
1318 return 0;
1319}
1320
1321static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
1322 unsigned long base)
1323{
1324 struct tc_u_knode *n = fh;
1325
1326 tc_cls_bind_class(classid, cl, q, &n->res, base);
1327}
1328
1329static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
1330 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
1331{
1332 struct tc_u_knode *n = fh;
1333 struct tc_u_hnode *ht_up, *ht_down;
1334 struct nlattr *nest;
1335
1336 if (n == NULL)
1337 return skb->len;
1338
1339 t->tcm_handle = n->handle;
1340
1341 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1342 if (nest == NULL)
1343 goto nla_put_failure;
1344
1345 if (TC_U32_KEY(n->handle) == 0) {
1346 struct tc_u_hnode *ht = fh;
1347 u32 divisor = ht->divisor + 1;
1348
1349 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1350 goto nla_put_failure;
1351 } else {
1352#ifdef CONFIG_CLS_U32_PERF
1353 struct tc_u32_pcnt *gpf;
1354 int cpu;
1355#endif
1356
1357 if (nla_put(skb, TCA_U32_SEL, struct_size(&n->sel, keys, n->sel.nkeys),
1358 &n->sel))
1359 goto nla_put_failure;
1360
1361 ht_up = rtnl_dereference(n->ht_up);
1362 if (ht_up) {
1363 u32 htid = n->handle & 0xFFFFF000;
1364 if (nla_put_u32(skb, TCA_U32_HASH, htid))
1365 goto nla_put_failure;
1366 }
1367 if (n->res.classid &&
1368 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1369 goto nla_put_failure;
1370
1371 ht_down = rtnl_dereference(n->ht_down);
1372 if (ht_down &&
1373 nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
1374 goto nla_put_failure;
1375
1376 if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1377 goto nla_put_failure;
1378
1379#ifdef CONFIG_CLS_U32_MARK
1380 if ((n->val || n->mask)) {
1381 struct tc_u32_mark mark = {.val = n->val,
1382 .mask = n->mask,
1383 .success = 0};
1384 int cpum;
1385
1386 for_each_possible_cpu(cpum) {
1387 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
1388
1389 mark.success += cnt;
1390 }
1391
1392 if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1393 goto nla_put_failure;
1394 }
1395#endif
1396
1397 if (tcf_exts_dump(skb, &n->exts) < 0)
1398 goto nla_put_failure;
1399
1400 if (n->ifindex) {
1401 struct net_device *dev;
1402 dev = __dev_get_by_index(net, n->ifindex);
1403 if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1404 goto nla_put_failure;
1405 }
1406#ifdef CONFIG_CLS_U32_PERF
1407 gpf = kzalloc(struct_size(gpf, kcnts, n->sel.nkeys), GFP_KERNEL);
1408 if (!gpf)
1409 goto nla_put_failure;
1410
1411 for_each_possible_cpu(cpu) {
1412 int i;
1413 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1414
1415 gpf->rcnt += pf->rcnt;
1416 gpf->rhit += pf->rhit;
1417 for (i = 0; i < n->sel.nkeys; i++)
1418 gpf->kcnts[i] += pf->kcnts[i];
1419 }
1420
1421 if (nla_put_64bit(skb, TCA_U32_PCNT, struct_size(gpf, kcnts, n->sel.nkeys),
1422 gpf, TCA_U32_PAD)) {
1423 kfree(gpf);
1424 goto nla_put_failure;
1425 }
1426 kfree(gpf);
1427#endif
1428 }
1429
1430 nla_nest_end(skb, nest);
1431
1432 if (TC_U32_KEY(n->handle))
1433 if (tcf_exts_dump_stats(skb, &n->exts) < 0)
1434 goto nla_put_failure;
1435 return skb->len;
1436
1437nla_put_failure:
1438 nla_nest_cancel(skb, nest);
1439 return -1;
1440}
1441
1442static struct tcf_proto_ops cls_u32_ops __read_mostly = {
1443 .kind = "u32",
1444 .classify = u32_classify,
1445 .init = u32_init,
1446 .destroy = u32_destroy,
1447 .get = u32_get,
1448 .change = u32_change,
1449 .delete = u32_delete,
1450 .walk = u32_walk,
1451 .reoffload = u32_reoffload,
1452 .dump = u32_dump,
1453 .bind_class = u32_bind_class,
1454 .owner = THIS_MODULE,
1455};
1456
1457static int __init init_u32(void)
1458{
1459 int i, ret;
1460
1461 pr_info("u32 classifier\n");
1462#ifdef CONFIG_CLS_U32_PERF
1463 pr_info(" Performance counters on\n");
1464#endif
1465 pr_info(" input device check on\n");
1466#ifdef CONFIG_NET_CLS_ACT
1467 pr_info(" Actions configured\n");
1468#endif
1469 tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE,
1470 sizeof(struct hlist_head),
1471 GFP_KERNEL);
1472 if (!tc_u_common_hash)
1473 return -ENOMEM;
1474
1475 for (i = 0; i < U32_HASH_SIZE; i++)
1476 INIT_HLIST_HEAD(&tc_u_common_hash[i]);
1477
1478 ret = register_tcf_proto_ops(&cls_u32_ops);
1479 if (ret)
1480 kvfree(tc_u_common_hash);
1481 return ret;
1482}
1483
1484static void __exit exit_u32(void)
1485{
1486 unregister_tcf_proto_ops(&cls_u32_ops);
1487 kvfree(tc_u_common_hash);
1488}
1489
1490module_init(init_u32)
1491module_exit(exit_u32)
1492MODULE_DESCRIPTION("Universal 32bit based TC Classifier");
1493MODULE_LICENSE("GPL");