Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/cls_route.c ROUTE4 classifier.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 */
7
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/errno.h>
14#include <linux/skbuff.h>
15#include <net/dst.h>
16#include <net/route.h>
17#include <net/netlink.h>
18#include <net/act_api.h>
19#include <net/pkt_cls.h>
20
21/*
22 * 1. For now we assume that route tags < 256.
23 * It allows to use direct table lookups, instead of hash tables.
24 * 2. For now we assume that "from TAG" and "fromdev DEV" statements
25 * are mutually exclusive.
26 * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
27 */
28struct route4_fastmap {
29 struct route4_filter *filter;
30 u32 id;
31 int iif;
32};
33
34struct route4_head {
35 struct route4_fastmap fastmap[16];
36 struct route4_bucket __rcu *table[256 + 1];
37 struct rcu_head rcu;
38};
39
40struct route4_bucket {
41 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
42 struct route4_filter __rcu *ht[16 + 16 + 1];
43 struct rcu_head rcu;
44};
45
46struct route4_filter {
47 struct route4_filter __rcu *next;
48 u32 id;
49 int iif;
50
51 struct tcf_result res;
52 struct tcf_exts exts;
53 u32 handle;
54 struct route4_bucket *bkt;
55 struct tcf_proto *tp;
56 struct rcu_work rwork;
57};
58
59#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
60
61static inline int route4_fastmap_hash(u32 id, int iif)
62{
63 return id & 0xF;
64}
65
66static DEFINE_SPINLOCK(fastmap_lock);
67static void
68route4_reset_fastmap(struct route4_head *head)
69{
70 spin_lock_bh(&fastmap_lock);
71 memset(head->fastmap, 0, sizeof(head->fastmap));
72 spin_unlock_bh(&fastmap_lock);
73}
74
75static void
76route4_set_fastmap(struct route4_head *head, u32 id, int iif,
77 struct route4_filter *f)
78{
79 int h = route4_fastmap_hash(id, iif);
80
81 /* fastmap updates must look atomic to aling id, iff, filter */
82 spin_lock_bh(&fastmap_lock);
83 head->fastmap[h].id = id;
84 head->fastmap[h].iif = iif;
85 head->fastmap[h].filter = f;
86 spin_unlock_bh(&fastmap_lock);
87}
88
89static inline int route4_hash_to(u32 id)
90{
91 return id & 0xFF;
92}
93
94static inline int route4_hash_from(u32 id)
95{
96 return (id >> 16) & 0xF;
97}
98
99static inline int route4_hash_iif(int iif)
100{
101 return 16 + ((iif >> 16) & 0xF);
102}
103
104static inline int route4_hash_wild(void)
105{
106 return 32;
107}
108
109#define ROUTE4_APPLY_RESULT() \
110{ \
111 *res = f->res; \
112 if (tcf_exts_has_actions(&f->exts)) { \
113 int r = tcf_exts_exec(skb, &f->exts, res); \
114 if (r < 0) { \
115 dont_cache = 1; \
116 continue; \
117 } \
118 return r; \
119 } else if (!dont_cache) \
120 route4_set_fastmap(head, id, iif, f); \
121 return 0; \
122}
123
124static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp,
125 struct tcf_result *res)
126{
127 struct route4_head *head = rcu_dereference_bh(tp->root);
128 struct dst_entry *dst;
129 struct route4_bucket *b;
130 struct route4_filter *f;
131 u32 id, h;
132 int iif, dont_cache = 0;
133
134 dst = skb_dst(skb);
135 if (!dst)
136 goto failure;
137
138 id = dst->tclassid;
139
140 iif = inet_iif(skb);
141
142 h = route4_fastmap_hash(id, iif);
143
144 spin_lock(&fastmap_lock);
145 if (id == head->fastmap[h].id &&
146 iif == head->fastmap[h].iif &&
147 (f = head->fastmap[h].filter) != NULL) {
148 if (f == ROUTE4_FAILURE) {
149 spin_unlock(&fastmap_lock);
150 goto failure;
151 }
152
153 *res = f->res;
154 spin_unlock(&fastmap_lock);
155 return 0;
156 }
157 spin_unlock(&fastmap_lock);
158
159 h = route4_hash_to(id);
160
161restart:
162 b = rcu_dereference_bh(head->table[h]);
163 if (b) {
164 for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]);
165 f;
166 f = rcu_dereference_bh(f->next))
167 if (f->id == id)
168 ROUTE4_APPLY_RESULT();
169
170 for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]);
171 f;
172 f = rcu_dereference_bh(f->next))
173 if (f->iif == iif)
174 ROUTE4_APPLY_RESULT();
175
176 for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]);
177 f;
178 f = rcu_dereference_bh(f->next))
179 ROUTE4_APPLY_RESULT();
180 }
181 if (h < 256) {
182 h = 256;
183 id &= ~0xFFFF;
184 goto restart;
185 }
186
187 if (!dont_cache)
188 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
189failure:
190 return -1;
191}
192
193static inline u32 to_hash(u32 id)
194{
195 u32 h = id & 0xFF;
196
197 if (id & 0x8000)
198 h += 256;
199 return h;
200}
201
202static inline u32 from_hash(u32 id)
203{
204 id &= 0xFFFF;
205 if (id == 0xFFFF)
206 return 32;
207 if (!(id & 0x8000)) {
208 if (id > 255)
209 return 256;
210 return id & 0xF;
211 }
212 return 16 + (id & 0xF);
213}
214
215static void *route4_get(struct tcf_proto *tp, u32 handle)
216{
217 struct route4_head *head = rtnl_dereference(tp->root);
218 struct route4_bucket *b;
219 struct route4_filter *f;
220 unsigned int h1, h2;
221
222 h1 = to_hash(handle);
223 if (h1 > 256)
224 return NULL;
225
226 h2 = from_hash(handle >> 16);
227 if (h2 > 32)
228 return NULL;
229
230 b = rtnl_dereference(head->table[h1]);
231 if (b) {
232 for (f = rtnl_dereference(b->ht[h2]);
233 f;
234 f = rtnl_dereference(f->next))
235 if (f->handle == handle)
236 return f;
237 }
238 return NULL;
239}
240
241static int route4_init(struct tcf_proto *tp)
242{
243 struct route4_head *head;
244
245 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
246 if (head == NULL)
247 return -ENOBUFS;
248
249 rcu_assign_pointer(tp->root, head);
250 return 0;
251}
252
253static void __route4_delete_filter(struct route4_filter *f)
254{
255 tcf_exts_destroy(&f->exts);
256 tcf_exts_put_net(&f->exts);
257 kfree(f);
258}
259
260static void route4_delete_filter_work(struct work_struct *work)
261{
262 struct route4_filter *f = container_of(to_rcu_work(work),
263 struct route4_filter,
264 rwork);
265 rtnl_lock();
266 __route4_delete_filter(f);
267 rtnl_unlock();
268}
269
270static void route4_queue_work(struct route4_filter *f)
271{
272 tcf_queue_work(&f->rwork, route4_delete_filter_work);
273}
274
275static void route4_destroy(struct tcf_proto *tp, bool rtnl_held,
276 struct netlink_ext_ack *extack)
277{
278 struct route4_head *head = rtnl_dereference(tp->root);
279 int h1, h2;
280
281 if (head == NULL)
282 return;
283
284 for (h1 = 0; h1 <= 256; h1++) {
285 struct route4_bucket *b;
286
287 b = rtnl_dereference(head->table[h1]);
288 if (b) {
289 for (h2 = 0; h2 <= 32; h2++) {
290 struct route4_filter *f;
291
292 while ((f = rtnl_dereference(b->ht[h2])) != NULL) {
293 struct route4_filter *next;
294
295 next = rtnl_dereference(f->next);
296 RCU_INIT_POINTER(b->ht[h2], next);
297 tcf_unbind_filter(tp, &f->res);
298 if (tcf_exts_get_net(&f->exts))
299 route4_queue_work(f);
300 else
301 __route4_delete_filter(f);
302 }
303 }
304 RCU_INIT_POINTER(head->table[h1], NULL);
305 kfree_rcu(b, rcu);
306 }
307 }
308 kfree_rcu(head, rcu);
309}
310
311static int route4_delete(struct tcf_proto *tp, void *arg, bool *last,
312 bool rtnl_held, struct netlink_ext_ack *extack)
313{
314 struct route4_head *head = rtnl_dereference(tp->root);
315 struct route4_filter *f = arg;
316 struct route4_filter __rcu **fp;
317 struct route4_filter *nf;
318 struct route4_bucket *b;
319 unsigned int h = 0;
320 int i, h1;
321
322 if (!head || !f)
323 return -EINVAL;
324
325 h = f->handle;
326 b = f->bkt;
327
328 fp = &b->ht[from_hash(h >> 16)];
329 for (nf = rtnl_dereference(*fp); nf;
330 fp = &nf->next, nf = rtnl_dereference(*fp)) {
331 if (nf == f) {
332 /* unlink it */
333 RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
334
335 /* Remove any fastmap lookups that might ref filter
336 * notice we unlink'd the filter so we can't get it
337 * back in the fastmap.
338 */
339 route4_reset_fastmap(head);
340
341 /* Delete it */
342 tcf_unbind_filter(tp, &f->res);
343 tcf_exts_get_net(&f->exts);
344 tcf_queue_work(&f->rwork, route4_delete_filter_work);
345
346 /* Strip RTNL protected tree */
347 for (i = 0; i <= 32; i++) {
348 struct route4_filter *rt;
349
350 rt = rtnl_dereference(b->ht[i]);
351 if (rt)
352 goto out;
353 }
354
355 /* OK, session has no flows */
356 RCU_INIT_POINTER(head->table[to_hash(h)], NULL);
357 kfree_rcu(b, rcu);
358 break;
359 }
360 }
361
362out:
363 *last = true;
364 for (h1 = 0; h1 <= 256; h1++) {
365 if (rcu_access_pointer(head->table[h1])) {
366 *last = false;
367 break;
368 }
369 }
370
371 return 0;
372}
373
374static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
375 [TCA_ROUTE4_CLASSID] = { .type = NLA_U32 },
376 [TCA_ROUTE4_TO] = { .type = NLA_U32 },
377 [TCA_ROUTE4_FROM] = { .type = NLA_U32 },
378 [TCA_ROUTE4_IIF] = { .type = NLA_U32 },
379};
380
381static int route4_set_parms(struct net *net, struct tcf_proto *tp,
382 unsigned long base, struct route4_filter *f,
383 u32 handle, struct route4_head *head,
384 struct nlattr **tb, struct nlattr *est, int new,
385 bool ovr, struct netlink_ext_ack *extack)
386{
387 u32 id = 0, to = 0, nhandle = 0x8000;
388 struct route4_filter *fp;
389 unsigned int h1;
390 struct route4_bucket *b;
391 int err;
392
393 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, true, extack);
394 if (err < 0)
395 return err;
396
397 if (tb[TCA_ROUTE4_TO]) {
398 if (new && handle & 0x8000)
399 return -EINVAL;
400 to = nla_get_u32(tb[TCA_ROUTE4_TO]);
401 if (to > 0xFF)
402 return -EINVAL;
403 nhandle = to;
404 }
405
406 if (tb[TCA_ROUTE4_FROM]) {
407 if (tb[TCA_ROUTE4_IIF])
408 return -EINVAL;
409 id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
410 if (id > 0xFF)
411 return -EINVAL;
412 nhandle |= id << 16;
413 } else if (tb[TCA_ROUTE4_IIF]) {
414 id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
415 if (id > 0x7FFF)
416 return -EINVAL;
417 nhandle |= (id | 0x8000) << 16;
418 } else
419 nhandle |= 0xFFFF << 16;
420
421 if (handle && new) {
422 nhandle |= handle & 0x7F00;
423 if (nhandle != handle)
424 return -EINVAL;
425 }
426
427 h1 = to_hash(nhandle);
428 b = rtnl_dereference(head->table[h1]);
429 if (!b) {
430 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
431 if (b == NULL)
432 return -ENOBUFS;
433
434 rcu_assign_pointer(head->table[h1], b);
435 } else {
436 unsigned int h2 = from_hash(nhandle >> 16);
437
438 for (fp = rtnl_dereference(b->ht[h2]);
439 fp;
440 fp = rtnl_dereference(fp->next))
441 if (fp->handle == f->handle)
442 return -EEXIST;
443 }
444
445 if (tb[TCA_ROUTE4_TO])
446 f->id = to;
447
448 if (tb[TCA_ROUTE4_FROM])
449 f->id = to | id<<16;
450 else if (tb[TCA_ROUTE4_IIF])
451 f->iif = id;
452
453 f->handle = nhandle;
454 f->bkt = b;
455 f->tp = tp;
456
457 if (tb[TCA_ROUTE4_CLASSID]) {
458 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
459 tcf_bind_filter(tp, &f->res, base);
460 }
461
462 return 0;
463}
464
465static int route4_change(struct net *net, struct sk_buff *in_skb,
466 struct tcf_proto *tp, unsigned long base, u32 handle,
467 struct nlattr **tca, void **arg, bool ovr,
468 bool rtnl_held, struct netlink_ext_ack *extack)
469{
470 struct route4_head *head = rtnl_dereference(tp->root);
471 struct route4_filter __rcu **fp;
472 struct route4_filter *fold, *f1, *pfp, *f = NULL;
473 struct route4_bucket *b;
474 struct nlattr *opt = tca[TCA_OPTIONS];
475 struct nlattr *tb[TCA_ROUTE4_MAX + 1];
476 unsigned int h, th;
477 int err;
478 bool new = true;
479
480 if (opt == NULL)
481 return handle ? -EINVAL : 0;
482
483 err = nla_parse_nested_deprecated(tb, TCA_ROUTE4_MAX, opt,
484 route4_policy, NULL);
485 if (err < 0)
486 return err;
487
488 fold = *arg;
489 if (fold && handle && fold->handle != handle)
490 return -EINVAL;
491
492 err = -ENOBUFS;
493 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
494 if (!f)
495 goto errout;
496
497 err = tcf_exts_init(&f->exts, net, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
498 if (err < 0)
499 goto errout;
500
501 if (fold) {
502 f->id = fold->id;
503 f->iif = fold->iif;
504 f->res = fold->res;
505 f->handle = fold->handle;
506
507 f->tp = fold->tp;
508 f->bkt = fold->bkt;
509 new = false;
510 }
511
512 err = route4_set_parms(net, tp, base, f, handle, head, tb,
513 tca[TCA_RATE], new, ovr, extack);
514 if (err < 0)
515 goto errout;
516
517 h = from_hash(f->handle >> 16);
518 fp = &f->bkt->ht[h];
519 for (pfp = rtnl_dereference(*fp);
520 (f1 = rtnl_dereference(*fp)) != NULL;
521 fp = &f1->next)
522 if (f->handle < f1->handle)
523 break;
524
525 tcf_block_netif_keep_dst(tp->chain->block);
526 rcu_assign_pointer(f->next, f1);
527 rcu_assign_pointer(*fp, f);
528
529 if (fold && fold->handle && f->handle != fold->handle) {
530 th = to_hash(fold->handle);
531 h = from_hash(fold->handle >> 16);
532 b = rtnl_dereference(head->table[th]);
533 if (b) {
534 fp = &b->ht[h];
535 for (pfp = rtnl_dereference(*fp); pfp;
536 fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
537 if (pfp == f) {
538 *fp = f->next;
539 break;
540 }
541 }
542 }
543 }
544
545 route4_reset_fastmap(head);
546 *arg = f;
547 if (fold) {
548 tcf_unbind_filter(tp, &fold->res);
549 tcf_exts_get_net(&fold->exts);
550 tcf_queue_work(&fold->rwork, route4_delete_filter_work);
551 }
552 return 0;
553
554errout:
555 if (f)
556 tcf_exts_destroy(&f->exts);
557 kfree(f);
558 return err;
559}
560
561static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg,
562 bool rtnl_held)
563{
564 struct route4_head *head = rtnl_dereference(tp->root);
565 unsigned int h, h1;
566
567 if (head == NULL || arg->stop)
568 return;
569
570 for (h = 0; h <= 256; h++) {
571 struct route4_bucket *b = rtnl_dereference(head->table[h]);
572
573 if (b) {
574 for (h1 = 0; h1 <= 32; h1++) {
575 struct route4_filter *f;
576
577 for (f = rtnl_dereference(b->ht[h1]);
578 f;
579 f = rtnl_dereference(f->next)) {
580 if (arg->count < arg->skip) {
581 arg->count++;
582 continue;
583 }
584 if (arg->fn(tp, f, arg) < 0) {
585 arg->stop = 1;
586 return;
587 }
588 arg->count++;
589 }
590 }
591 }
592 }
593}
594
595static int route4_dump(struct net *net, struct tcf_proto *tp, void *fh,
596 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
597{
598 struct route4_filter *f = fh;
599 struct nlattr *nest;
600 u32 id;
601
602 if (f == NULL)
603 return skb->len;
604
605 t->tcm_handle = f->handle;
606
607 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
608 if (nest == NULL)
609 goto nla_put_failure;
610
611 if (!(f->handle & 0x8000)) {
612 id = f->id & 0xFF;
613 if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
614 goto nla_put_failure;
615 }
616 if (f->handle & 0x80000000) {
617 if ((f->handle >> 16) != 0xFFFF &&
618 nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
619 goto nla_put_failure;
620 } else {
621 id = f->id >> 16;
622 if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
623 goto nla_put_failure;
624 }
625 if (f->res.classid &&
626 nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
627 goto nla_put_failure;
628
629 if (tcf_exts_dump(skb, &f->exts) < 0)
630 goto nla_put_failure;
631
632 nla_nest_end(skb, nest);
633
634 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
635 goto nla_put_failure;
636
637 return skb->len;
638
639nla_put_failure:
640 nla_nest_cancel(skb, nest);
641 return -1;
642}
643
644static void route4_bind_class(void *fh, u32 classid, unsigned long cl)
645{
646 struct route4_filter *f = fh;
647
648 if (f && f->res.classid == classid)
649 f->res.class = cl;
650}
651
652static struct tcf_proto_ops cls_route4_ops __read_mostly = {
653 .kind = "route",
654 .classify = route4_classify,
655 .init = route4_init,
656 .destroy = route4_destroy,
657 .get = route4_get,
658 .change = route4_change,
659 .delete = route4_delete,
660 .walk = route4_walk,
661 .dump = route4_dump,
662 .bind_class = route4_bind_class,
663 .owner = THIS_MODULE,
664};
665
666static int __init init_route4(void)
667{
668 return register_tcf_proto_ops(&cls_route4_ops);
669}
670
671static void __exit exit_route4(void)
672{
673 unregister_tcf_proto_ops(&cls_route4_ops);
674}
675
676module_init(init_route4)
677module_exit(exit_route4)
678MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/cls_route.c ROUTE4 classifier.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 */
7
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/errno.h>
14#include <linux/skbuff.h>
15#include <net/dst.h>
16#include <net/route.h>
17#include <net/netlink.h>
18#include <net/act_api.h>
19#include <net/pkt_cls.h>
20#include <net/tc_wrapper.h>
21
22/*
23 * 1. For now we assume that route tags < 256.
24 * It allows to use direct table lookups, instead of hash tables.
25 * 2. For now we assume that "from TAG" and "fromdev DEV" statements
26 * are mutually exclusive.
27 * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
28 */
29struct route4_fastmap {
30 struct route4_filter *filter;
31 u32 id;
32 int iif;
33};
34
35struct route4_head {
36 struct route4_fastmap fastmap[16];
37 struct route4_bucket __rcu *table[256 + 1];
38 struct rcu_head rcu;
39};
40
41struct route4_bucket {
42 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
43 struct route4_filter __rcu *ht[16 + 16 + 1];
44 struct rcu_head rcu;
45};
46
47struct route4_filter {
48 struct route4_filter __rcu *next;
49 u32 id;
50 int iif;
51
52 struct tcf_result res;
53 struct tcf_exts exts;
54 u32 handle;
55 struct route4_bucket *bkt;
56 struct tcf_proto *tp;
57 struct rcu_work rwork;
58};
59
60#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
61
62static inline int route4_fastmap_hash(u32 id, int iif)
63{
64 return id & 0xF;
65}
66
67static DEFINE_SPINLOCK(fastmap_lock);
68static void
69route4_reset_fastmap(struct route4_head *head)
70{
71 spin_lock_bh(&fastmap_lock);
72 memset(head->fastmap, 0, sizeof(head->fastmap));
73 spin_unlock_bh(&fastmap_lock);
74}
75
76static void
77route4_set_fastmap(struct route4_head *head, u32 id, int iif,
78 struct route4_filter *f)
79{
80 int h = route4_fastmap_hash(id, iif);
81
82 /* fastmap updates must look atomic to aling id, iff, filter */
83 spin_lock_bh(&fastmap_lock);
84 head->fastmap[h].id = id;
85 head->fastmap[h].iif = iif;
86 head->fastmap[h].filter = f;
87 spin_unlock_bh(&fastmap_lock);
88}
89
90static inline int route4_hash_to(u32 id)
91{
92 return id & 0xFF;
93}
94
95static inline int route4_hash_from(u32 id)
96{
97 return (id >> 16) & 0xF;
98}
99
100static inline int route4_hash_iif(int iif)
101{
102 return 16 + ((iif >> 16) & 0xF);
103}
104
105static inline int route4_hash_wild(void)
106{
107 return 32;
108}
109
110#define ROUTE4_APPLY_RESULT() \
111{ \
112 *res = f->res; \
113 if (tcf_exts_has_actions(&f->exts)) { \
114 int r = tcf_exts_exec(skb, &f->exts, res); \
115 if (r < 0) { \
116 dont_cache = 1; \
117 continue; \
118 } \
119 return r; \
120 } else if (!dont_cache) \
121 route4_set_fastmap(head, id, iif, f); \
122 return 0; \
123}
124
125TC_INDIRECT_SCOPE int route4_classify(struct sk_buff *skb,
126 const struct tcf_proto *tp,
127 struct tcf_result *res)
128{
129 struct route4_head *head = rcu_dereference_bh(tp->root);
130 struct dst_entry *dst;
131 struct route4_bucket *b;
132 struct route4_filter *f;
133 u32 id, h;
134 int iif, dont_cache = 0;
135
136 dst = skb_dst(skb);
137 if (!dst)
138 goto failure;
139
140 id = dst->tclassid;
141
142 iif = inet_iif(skb);
143
144 h = route4_fastmap_hash(id, iif);
145
146 spin_lock(&fastmap_lock);
147 if (id == head->fastmap[h].id &&
148 iif == head->fastmap[h].iif &&
149 (f = head->fastmap[h].filter) != NULL) {
150 if (f == ROUTE4_FAILURE) {
151 spin_unlock(&fastmap_lock);
152 goto failure;
153 }
154
155 *res = f->res;
156 spin_unlock(&fastmap_lock);
157 return 0;
158 }
159 spin_unlock(&fastmap_lock);
160
161 h = route4_hash_to(id);
162
163restart:
164 b = rcu_dereference_bh(head->table[h]);
165 if (b) {
166 for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]);
167 f;
168 f = rcu_dereference_bh(f->next))
169 if (f->id == id)
170 ROUTE4_APPLY_RESULT();
171
172 for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]);
173 f;
174 f = rcu_dereference_bh(f->next))
175 if (f->iif == iif)
176 ROUTE4_APPLY_RESULT();
177
178 for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]);
179 f;
180 f = rcu_dereference_bh(f->next))
181 ROUTE4_APPLY_RESULT();
182 }
183 if (h < 256) {
184 h = 256;
185 id &= ~0xFFFF;
186 goto restart;
187 }
188
189 if (!dont_cache)
190 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
191failure:
192 return -1;
193}
194
195static inline u32 to_hash(u32 id)
196{
197 u32 h = id & 0xFF;
198
199 if (id & 0x8000)
200 h += 256;
201 return h;
202}
203
204static inline u32 from_hash(u32 id)
205{
206 id &= 0xFFFF;
207 if (id == 0xFFFF)
208 return 32;
209 if (!(id & 0x8000)) {
210 if (id > 255)
211 return 256;
212 return id & 0xF;
213 }
214 return 16 + (id & 0xF);
215}
216
217static void *route4_get(struct tcf_proto *tp, u32 handle)
218{
219 struct route4_head *head = rtnl_dereference(tp->root);
220 struct route4_bucket *b;
221 struct route4_filter *f;
222 unsigned int h1, h2;
223
224 h1 = to_hash(handle);
225 if (h1 > 256)
226 return NULL;
227
228 h2 = from_hash(handle >> 16);
229 if (h2 > 32)
230 return NULL;
231
232 b = rtnl_dereference(head->table[h1]);
233 if (b) {
234 for (f = rtnl_dereference(b->ht[h2]);
235 f;
236 f = rtnl_dereference(f->next))
237 if (f->handle == handle)
238 return f;
239 }
240 return NULL;
241}
242
243static int route4_init(struct tcf_proto *tp)
244{
245 struct route4_head *head;
246
247 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
248 if (head == NULL)
249 return -ENOBUFS;
250
251 rcu_assign_pointer(tp->root, head);
252 return 0;
253}
254
255static void __route4_delete_filter(struct route4_filter *f)
256{
257 tcf_exts_destroy(&f->exts);
258 tcf_exts_put_net(&f->exts);
259 kfree(f);
260}
261
262static void route4_delete_filter_work(struct work_struct *work)
263{
264 struct route4_filter *f = container_of(to_rcu_work(work),
265 struct route4_filter,
266 rwork);
267 rtnl_lock();
268 __route4_delete_filter(f);
269 rtnl_unlock();
270}
271
272static void route4_queue_work(struct route4_filter *f)
273{
274 tcf_queue_work(&f->rwork, route4_delete_filter_work);
275}
276
277static void route4_destroy(struct tcf_proto *tp, bool rtnl_held,
278 struct netlink_ext_ack *extack)
279{
280 struct route4_head *head = rtnl_dereference(tp->root);
281 int h1, h2;
282
283 if (head == NULL)
284 return;
285
286 for (h1 = 0; h1 <= 256; h1++) {
287 struct route4_bucket *b;
288
289 b = rtnl_dereference(head->table[h1]);
290 if (b) {
291 for (h2 = 0; h2 <= 32; h2++) {
292 struct route4_filter *f;
293
294 while ((f = rtnl_dereference(b->ht[h2])) != NULL) {
295 struct route4_filter *next;
296
297 next = rtnl_dereference(f->next);
298 RCU_INIT_POINTER(b->ht[h2], next);
299 tcf_unbind_filter(tp, &f->res);
300 if (tcf_exts_get_net(&f->exts))
301 route4_queue_work(f);
302 else
303 __route4_delete_filter(f);
304 }
305 }
306 RCU_INIT_POINTER(head->table[h1], NULL);
307 kfree_rcu(b, rcu);
308 }
309 }
310 kfree_rcu(head, rcu);
311}
312
313static int route4_delete(struct tcf_proto *tp, void *arg, bool *last,
314 bool rtnl_held, struct netlink_ext_ack *extack)
315{
316 struct route4_head *head = rtnl_dereference(tp->root);
317 struct route4_filter *f = arg;
318 struct route4_filter __rcu **fp;
319 struct route4_filter *nf;
320 struct route4_bucket *b;
321 unsigned int h = 0;
322 int i, h1;
323
324 if (!head || !f)
325 return -EINVAL;
326
327 h = f->handle;
328 b = f->bkt;
329
330 fp = &b->ht[from_hash(h >> 16)];
331 for (nf = rtnl_dereference(*fp); nf;
332 fp = &nf->next, nf = rtnl_dereference(*fp)) {
333 if (nf == f) {
334 /* unlink it */
335 RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
336
337 /* Remove any fastmap lookups that might ref filter
338 * notice we unlink'd the filter so we can't get it
339 * back in the fastmap.
340 */
341 route4_reset_fastmap(head);
342
343 /* Delete it */
344 tcf_unbind_filter(tp, &f->res);
345 tcf_exts_get_net(&f->exts);
346 tcf_queue_work(&f->rwork, route4_delete_filter_work);
347
348 /* Strip RTNL protected tree */
349 for (i = 0; i <= 32; i++) {
350 struct route4_filter *rt;
351
352 rt = rtnl_dereference(b->ht[i]);
353 if (rt)
354 goto out;
355 }
356
357 /* OK, session has no flows */
358 RCU_INIT_POINTER(head->table[to_hash(h)], NULL);
359 kfree_rcu(b, rcu);
360 break;
361 }
362 }
363
364out:
365 *last = true;
366 for (h1 = 0; h1 <= 256; h1++) {
367 if (rcu_access_pointer(head->table[h1])) {
368 *last = false;
369 break;
370 }
371 }
372
373 return 0;
374}
375
376static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
377 [TCA_ROUTE4_CLASSID] = { .type = NLA_U32 },
378 [TCA_ROUTE4_TO] = { .type = NLA_U32 },
379 [TCA_ROUTE4_FROM] = { .type = NLA_U32 },
380 [TCA_ROUTE4_IIF] = { .type = NLA_U32 },
381};
382
383static int route4_set_parms(struct net *net, struct tcf_proto *tp,
384 unsigned long base, struct route4_filter *f,
385 u32 handle, struct route4_head *head,
386 struct nlattr **tb, struct nlattr *est, int new,
387 u32 flags, struct netlink_ext_ack *extack)
388{
389 u32 id = 0, to = 0, nhandle = 0x8000;
390 struct route4_filter *fp;
391 unsigned int h1;
392 struct route4_bucket *b;
393 int err;
394
395 err = tcf_exts_validate(net, tp, tb, est, &f->exts, flags, extack);
396 if (err < 0)
397 return err;
398
399 if (tb[TCA_ROUTE4_TO]) {
400 if (new && handle & 0x8000)
401 return -EINVAL;
402 to = nla_get_u32(tb[TCA_ROUTE4_TO]);
403 if (to > 0xFF)
404 return -EINVAL;
405 nhandle = to;
406 }
407
408 if (tb[TCA_ROUTE4_FROM]) {
409 if (tb[TCA_ROUTE4_IIF])
410 return -EINVAL;
411 id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
412 if (id > 0xFF)
413 return -EINVAL;
414 nhandle |= id << 16;
415 } else if (tb[TCA_ROUTE4_IIF]) {
416 id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
417 if (id > 0x7FFF)
418 return -EINVAL;
419 nhandle |= (id | 0x8000) << 16;
420 } else
421 nhandle |= 0xFFFF << 16;
422
423 if (handle && new) {
424 nhandle |= handle & 0x7F00;
425 if (nhandle != handle)
426 return -EINVAL;
427 }
428
429 if (!nhandle) {
430 NL_SET_ERR_MSG(extack, "Replacing with handle of 0 is invalid");
431 return -EINVAL;
432 }
433
434 h1 = to_hash(nhandle);
435 b = rtnl_dereference(head->table[h1]);
436 if (!b) {
437 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
438 if (b == NULL)
439 return -ENOBUFS;
440
441 rcu_assign_pointer(head->table[h1], b);
442 } else {
443 unsigned int h2 = from_hash(nhandle >> 16);
444
445 for (fp = rtnl_dereference(b->ht[h2]);
446 fp;
447 fp = rtnl_dereference(fp->next))
448 if (fp->handle == f->handle)
449 return -EEXIST;
450 }
451
452 if (tb[TCA_ROUTE4_TO])
453 f->id = to;
454
455 if (tb[TCA_ROUTE4_FROM])
456 f->id = to | id<<16;
457 else if (tb[TCA_ROUTE4_IIF])
458 f->iif = id;
459
460 f->handle = nhandle;
461 f->bkt = b;
462 f->tp = tp;
463
464 if (tb[TCA_ROUTE4_CLASSID]) {
465 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
466 tcf_bind_filter(tp, &f->res, base);
467 }
468
469 return 0;
470}
471
472static int route4_change(struct net *net, struct sk_buff *in_skb,
473 struct tcf_proto *tp, unsigned long base, u32 handle,
474 struct nlattr **tca, void **arg, u32 flags,
475 struct netlink_ext_ack *extack)
476{
477 struct route4_head *head = rtnl_dereference(tp->root);
478 struct route4_filter __rcu **fp;
479 struct route4_filter *fold, *f1, *pfp, *f = NULL;
480 struct route4_bucket *b;
481 struct nlattr *opt = tca[TCA_OPTIONS];
482 struct nlattr *tb[TCA_ROUTE4_MAX + 1];
483 unsigned int h, th;
484 int err;
485 bool new = true;
486
487 if (!handle) {
488 NL_SET_ERR_MSG(extack, "Creating with handle of 0 is invalid");
489 return -EINVAL;
490 }
491
492 if (opt == NULL)
493 return -EINVAL;
494
495 err = nla_parse_nested_deprecated(tb, TCA_ROUTE4_MAX, opt,
496 route4_policy, NULL);
497 if (err < 0)
498 return err;
499
500 fold = *arg;
501 if (fold && fold->handle != handle)
502 return -EINVAL;
503
504 err = -ENOBUFS;
505 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
506 if (!f)
507 goto errout;
508
509 err = tcf_exts_init(&f->exts, net, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
510 if (err < 0)
511 goto errout;
512
513 if (fold) {
514 f->id = fold->id;
515 f->iif = fold->iif;
516 f->res = fold->res;
517 f->handle = fold->handle;
518
519 f->tp = fold->tp;
520 f->bkt = fold->bkt;
521 new = false;
522 }
523
524 err = route4_set_parms(net, tp, base, f, handle, head, tb,
525 tca[TCA_RATE], new, flags, extack);
526 if (err < 0)
527 goto errout;
528
529 h = from_hash(f->handle >> 16);
530 fp = &f->bkt->ht[h];
531 for (pfp = rtnl_dereference(*fp);
532 (f1 = rtnl_dereference(*fp)) != NULL;
533 fp = &f1->next)
534 if (f->handle < f1->handle)
535 break;
536
537 tcf_block_netif_keep_dst(tp->chain->block);
538 rcu_assign_pointer(f->next, f1);
539 rcu_assign_pointer(*fp, f);
540
541 if (fold) {
542 th = to_hash(fold->handle);
543 h = from_hash(fold->handle >> 16);
544 b = rtnl_dereference(head->table[th]);
545 if (b) {
546 fp = &b->ht[h];
547 for (pfp = rtnl_dereference(*fp); pfp;
548 fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
549 if (pfp == fold) {
550 rcu_assign_pointer(*fp, fold->next);
551 break;
552 }
553 }
554 }
555 }
556
557 route4_reset_fastmap(head);
558 *arg = f;
559 if (fold) {
560 tcf_unbind_filter(tp, &fold->res);
561 tcf_exts_get_net(&fold->exts);
562 tcf_queue_work(&fold->rwork, route4_delete_filter_work);
563 }
564 return 0;
565
566errout:
567 if (f)
568 tcf_exts_destroy(&f->exts);
569 kfree(f);
570 return err;
571}
572
573static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg,
574 bool rtnl_held)
575{
576 struct route4_head *head = rtnl_dereference(tp->root);
577 unsigned int h, h1;
578
579 if (head == NULL || arg->stop)
580 return;
581
582 for (h = 0; h <= 256; h++) {
583 struct route4_bucket *b = rtnl_dereference(head->table[h]);
584
585 if (b) {
586 for (h1 = 0; h1 <= 32; h1++) {
587 struct route4_filter *f;
588
589 for (f = rtnl_dereference(b->ht[h1]);
590 f;
591 f = rtnl_dereference(f->next)) {
592 if (!tc_cls_stats_dump(tp, arg, f))
593 return;
594 }
595 }
596 }
597 }
598}
599
600static int route4_dump(struct net *net, struct tcf_proto *tp, void *fh,
601 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
602{
603 struct route4_filter *f = fh;
604 struct nlattr *nest;
605 u32 id;
606
607 if (f == NULL)
608 return skb->len;
609
610 t->tcm_handle = f->handle;
611
612 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
613 if (nest == NULL)
614 goto nla_put_failure;
615
616 if (!(f->handle & 0x8000)) {
617 id = f->id & 0xFF;
618 if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
619 goto nla_put_failure;
620 }
621 if (f->handle & 0x80000000) {
622 if ((f->handle >> 16) != 0xFFFF &&
623 nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
624 goto nla_put_failure;
625 } else {
626 id = f->id >> 16;
627 if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
628 goto nla_put_failure;
629 }
630 if (f->res.classid &&
631 nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
632 goto nla_put_failure;
633
634 if (tcf_exts_dump(skb, &f->exts) < 0)
635 goto nla_put_failure;
636
637 nla_nest_end(skb, nest);
638
639 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
640 goto nla_put_failure;
641
642 return skb->len;
643
644nla_put_failure:
645 nla_nest_cancel(skb, nest);
646 return -1;
647}
648
649static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
650 unsigned long base)
651{
652 struct route4_filter *f = fh;
653
654 tc_cls_bind_class(classid, cl, q, &f->res, base);
655}
656
657static struct tcf_proto_ops cls_route4_ops __read_mostly = {
658 .kind = "route",
659 .classify = route4_classify,
660 .init = route4_init,
661 .destroy = route4_destroy,
662 .get = route4_get,
663 .change = route4_change,
664 .delete = route4_delete,
665 .walk = route4_walk,
666 .dump = route4_dump,
667 .bind_class = route4_bind_class,
668 .owner = THIS_MODULE,
669};
670
671static int __init init_route4(void)
672{
673 return register_tcf_proto_ops(&cls_route4_ops);
674}
675
676static void __exit exit_route4(void)
677{
678 unregister_tcf_proto_ops(&cls_route4_ops);
679}
680
681module_init(init_route4)
682module_exit(exit_route4)
683MODULE_LICENSE("GPL");