Loading...
1/*
2 * net/sched/cls_rsvp.h Template file for RSVPv[46] classifiers.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
12/*
13 Comparing to general packet classification problem,
14 RSVP needs only sevaral relatively simple rules:
15
16 * (dst, protocol) are always specified,
17 so that we are able to hash them.
18 * src may be exact, or may be wildcard, so that
19 we can keep a hash table plus one wildcard entry.
20 * source port (or flow label) is important only if src is given.
21
22 IMPLEMENTATION.
23
24 We use a two level hash table: The top level is keyed by
25 destination address and protocol ID, every bucket contains a list
26 of "rsvp sessions", identified by destination address, protocol and
27 DPI(="Destination Port ID"): triple (key, mask, offset).
28
29 Every bucket has a smaller hash table keyed by source address
30 (cf. RSVP flowspec) and one wildcard entry for wildcard reservations.
31 Every bucket is again a list of "RSVP flows", selected by
32 source address and SPI(="Source Port ID" here rather than
33 "security parameter index"): triple (key, mask, offset).
34
35
36 NOTE 1. All the packets with IPv6 extension headers (but AH and ESP)
37 and all fragmented packets go to the best-effort traffic class.
38
39
40 NOTE 2. Two "port id"'s seems to be redundant, rfc2207 requires
41 only one "Generalized Port Identifier". So that for classic
42 ah, esp (and udp,tcp) both *pi should coincide or one of them
43 should be wildcard.
44
45 At first sight, this redundancy is just a waste of CPU
46 resources. But DPI and SPI add the possibility to assign different
47 priorities to GPIs. Look also at note 4 about tunnels below.
48
49
50 NOTE 3. One complication is the case of tunneled packets.
51 We implement it as following: if the first lookup
52 matches a special session with "tunnelhdr" value not zero,
53 flowid doesn't contain the true flow ID, but the tunnel ID (1...255).
54 In this case, we pull tunnelhdr bytes and restart lookup
55 with tunnel ID added to the list of keys. Simple and stupid 8)8)
56 It's enough for PIMREG and IPIP.
57
58
59 NOTE 4. Two GPIs make it possible to parse even GRE packets.
60 F.e. DPI can select ETH_P_IP (and necessary flags to make
61 tunnelhdr correct) in GRE protocol field and SPI matches
62 GRE key. Is it not nice? 8)8)
63
64
65 Well, as result, despite its simplicity, we get a pretty
66 powerful classification engine. */
67
68
69struct rsvp_head {
70 u32 tmap[256/32];
71 u32 hgenerator;
72 u8 tgenerator;
73 struct rsvp_session *ht[256];
74};
75
76struct rsvp_session {
77 struct rsvp_session *next;
78 __be32 dst[RSVP_DST_LEN];
79 struct tc_rsvp_gpi dpi;
80 u8 protocol;
81 u8 tunnelid;
82 /* 16 (src,sport) hash slots, and one wildcard source slot */
83 struct rsvp_filter *ht[16 + 1];
84};
85
86
87struct rsvp_filter {
88 struct rsvp_filter *next;
89 __be32 src[RSVP_DST_LEN];
90 struct tc_rsvp_gpi spi;
91 u8 tunnelhdr;
92
93 struct tcf_result res;
94 struct tcf_exts exts;
95
96 u32 handle;
97 struct rsvp_session *sess;
98};
99
100static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
101{
102 unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
103
104 h ^= h>>16;
105 h ^= h>>8;
106 return (h ^ protocol ^ tunnelid) & 0xFF;
107}
108
109static inline unsigned int hash_src(__be32 *src)
110{
111 unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
112
113 h ^= h>>16;
114 h ^= h>>8;
115 h ^= h>>4;
116 return h & 0xF;
117}
118
119static struct tcf_ext_map rsvp_ext_map = {
120 .police = TCA_RSVP_POLICE,
121 .action = TCA_RSVP_ACT
122};
123
124#define RSVP_APPLY_RESULT() \
125{ \
126 int r = tcf_exts_exec(skb, &f->exts, res); \
127 if (r < 0) \
128 continue; \
129 else if (r > 0) \
130 return r; \
131}
132
133static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
134 struct tcf_result *res)
135{
136 struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
137 struct rsvp_session *s;
138 struct rsvp_filter *f;
139 unsigned int h1, h2;
140 __be32 *dst, *src;
141 u8 protocol;
142 u8 tunnelid = 0;
143 u8 *xprt;
144#if RSVP_DST_LEN == 4
145 struct ipv6hdr *nhptr;
146
147 if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
148 return -1;
149 nhptr = ipv6_hdr(skb);
150#else
151 struct iphdr *nhptr;
152
153 if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
154 return -1;
155 nhptr = ip_hdr(skb);
156#endif
157
158restart:
159
160#if RSVP_DST_LEN == 4
161 src = &nhptr->saddr.s6_addr32[0];
162 dst = &nhptr->daddr.s6_addr32[0];
163 protocol = nhptr->nexthdr;
164 xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
165#else
166 src = &nhptr->saddr;
167 dst = &nhptr->daddr;
168 protocol = nhptr->protocol;
169 xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
170 if (ip_is_fragment(nhptr))
171 return -1;
172#endif
173
174 h1 = hash_dst(dst, protocol, tunnelid);
175 h2 = hash_src(src);
176
177 for (s = sht[h1]; s; s = s->next) {
178 if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
179 protocol == s->protocol &&
180 !(s->dpi.mask &
181 (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
182#if RSVP_DST_LEN == 4
183 dst[0] == s->dst[0] &&
184 dst[1] == s->dst[1] &&
185 dst[2] == s->dst[2] &&
186#endif
187 tunnelid == s->tunnelid) {
188
189 for (f = s->ht[h2]; f; f = f->next) {
190 if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
191 !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
192#if RSVP_DST_LEN == 4
193 &&
194 src[0] == f->src[0] &&
195 src[1] == f->src[1] &&
196 src[2] == f->src[2]
197#endif
198 ) {
199 *res = f->res;
200 RSVP_APPLY_RESULT();
201
202matched:
203 if (f->tunnelhdr == 0)
204 return 0;
205
206 tunnelid = f->res.classid;
207 nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
208 goto restart;
209 }
210 }
211
212 /* And wildcard bucket... */
213 for (f = s->ht[16]; f; f = f->next) {
214 *res = f->res;
215 RSVP_APPLY_RESULT();
216 goto matched;
217 }
218 return -1;
219 }
220 }
221 return -1;
222}
223
224static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle)
225{
226 struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
227 struct rsvp_session *s;
228 struct rsvp_filter *f;
229 unsigned int h1 = handle & 0xFF;
230 unsigned int h2 = (handle >> 8) & 0xFF;
231
232 if (h2 > 16)
233 return 0;
234
235 for (s = sht[h1]; s; s = s->next) {
236 for (f = s->ht[h2]; f; f = f->next) {
237 if (f->handle == handle)
238 return (unsigned long)f;
239 }
240 }
241 return 0;
242}
243
244static void rsvp_put(struct tcf_proto *tp, unsigned long f)
245{
246}
247
248static int rsvp_init(struct tcf_proto *tp)
249{
250 struct rsvp_head *data;
251
252 data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL);
253 if (data) {
254 tp->root = data;
255 return 0;
256 }
257 return -ENOBUFS;
258}
259
260static void
261rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
262{
263 tcf_unbind_filter(tp, &f->res);
264 tcf_exts_destroy(tp, &f->exts);
265 kfree(f);
266}
267
268static void rsvp_destroy(struct tcf_proto *tp)
269{
270 struct rsvp_head *data = xchg(&tp->root, NULL);
271 struct rsvp_session **sht;
272 int h1, h2;
273
274 if (data == NULL)
275 return;
276
277 sht = data->ht;
278
279 for (h1 = 0; h1 < 256; h1++) {
280 struct rsvp_session *s;
281
282 while ((s = sht[h1]) != NULL) {
283 sht[h1] = s->next;
284
285 for (h2 = 0; h2 <= 16; h2++) {
286 struct rsvp_filter *f;
287
288 while ((f = s->ht[h2]) != NULL) {
289 s->ht[h2] = f->next;
290 rsvp_delete_filter(tp, f);
291 }
292 }
293 kfree(s);
294 }
295 }
296 kfree(data);
297}
298
299static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
300{
301 struct rsvp_filter **fp, *f = (struct rsvp_filter *)arg;
302 unsigned int h = f->handle;
303 struct rsvp_session **sp;
304 struct rsvp_session *s = f->sess;
305 int i;
306
307 for (fp = &s->ht[(h >> 8) & 0xFF]; *fp; fp = &(*fp)->next) {
308 if (*fp == f) {
309 tcf_tree_lock(tp);
310 *fp = f->next;
311 tcf_tree_unlock(tp);
312 rsvp_delete_filter(tp, f);
313
314 /* Strip tree */
315
316 for (i = 0; i <= 16; i++)
317 if (s->ht[i])
318 return 0;
319
320 /* OK, session has no flows */
321 for (sp = &((struct rsvp_head *)tp->root)->ht[h & 0xFF];
322 *sp; sp = &(*sp)->next) {
323 if (*sp == s) {
324 tcf_tree_lock(tp);
325 *sp = s->next;
326 tcf_tree_unlock(tp);
327
328 kfree(s);
329 return 0;
330 }
331 }
332
333 return 0;
334 }
335 }
336 return 0;
337}
338
339static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
340{
341 struct rsvp_head *data = tp->root;
342 int i = 0xFFFF;
343
344 while (i-- > 0) {
345 u32 h;
346
347 if ((data->hgenerator += 0x10000) == 0)
348 data->hgenerator = 0x10000;
349 h = data->hgenerator|salt;
350 if (rsvp_get(tp, h) == 0)
351 return h;
352 }
353 return 0;
354}
355
356static int tunnel_bts(struct rsvp_head *data)
357{
358 int n = data->tgenerator >> 5;
359 u32 b = 1 << (data->tgenerator & 0x1F);
360
361 if (data->tmap[n] & b)
362 return 0;
363 data->tmap[n] |= b;
364 return 1;
365}
366
367static void tunnel_recycle(struct rsvp_head *data)
368{
369 struct rsvp_session **sht = data->ht;
370 u32 tmap[256/32];
371 int h1, h2;
372
373 memset(tmap, 0, sizeof(tmap));
374
375 for (h1 = 0; h1 < 256; h1++) {
376 struct rsvp_session *s;
377 for (s = sht[h1]; s; s = s->next) {
378 for (h2 = 0; h2 <= 16; h2++) {
379 struct rsvp_filter *f;
380
381 for (f = s->ht[h2]; f; f = f->next) {
382 if (f->tunnelhdr == 0)
383 continue;
384 data->tgenerator = f->res.classid;
385 tunnel_bts(data);
386 }
387 }
388 }
389 }
390
391 memcpy(data->tmap, tmap, sizeof(tmap));
392}
393
394static u32 gen_tunnel(struct rsvp_head *data)
395{
396 int i, k;
397
398 for (k = 0; k < 2; k++) {
399 for (i = 255; i > 0; i--) {
400 if (++data->tgenerator == 0)
401 data->tgenerator = 1;
402 if (tunnel_bts(data))
403 return data->tgenerator;
404 }
405 tunnel_recycle(data);
406 }
407 return 0;
408}
409
410static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
411 [TCA_RSVP_CLASSID] = { .type = NLA_U32 },
412 [TCA_RSVP_DST] = { .type = NLA_BINARY,
413 .len = RSVP_DST_LEN * sizeof(u32) },
414 [TCA_RSVP_SRC] = { .type = NLA_BINARY,
415 .len = RSVP_DST_LEN * sizeof(u32) },
416 [TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) },
417};
418
419static int rsvp_change(struct tcf_proto *tp, unsigned long base,
420 u32 handle,
421 struct nlattr **tca,
422 unsigned long *arg)
423{
424 struct rsvp_head *data = tp->root;
425 struct rsvp_filter *f, **fp;
426 struct rsvp_session *s, **sp;
427 struct tc_rsvp_pinfo *pinfo = NULL;
428 struct nlattr *opt = tca[TCA_OPTIONS];
429 struct nlattr *tb[TCA_RSVP_MAX + 1];
430 struct tcf_exts e;
431 unsigned int h1, h2;
432 __be32 *dst;
433 int err;
434
435 if (opt == NULL)
436 return handle ? -EINVAL : 0;
437
438 err = nla_parse_nested(tb, TCA_RSVP_MAX, opt, rsvp_policy);
439 if (err < 0)
440 return err;
441
442 err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map);
443 if (err < 0)
444 return err;
445
446 f = (struct rsvp_filter *)*arg;
447 if (f) {
448 /* Node exists: adjust only classid */
449
450 if (f->handle != handle && handle)
451 goto errout2;
452 if (tb[TCA_RSVP_CLASSID]) {
453 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
454 tcf_bind_filter(tp, &f->res, base);
455 }
456
457 tcf_exts_change(tp, &f->exts, &e);
458 return 0;
459 }
460
461 /* Now more serious part... */
462 err = -EINVAL;
463 if (handle)
464 goto errout2;
465 if (tb[TCA_RSVP_DST] == NULL)
466 goto errout2;
467
468 err = -ENOBUFS;
469 f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL);
470 if (f == NULL)
471 goto errout2;
472
473 h2 = 16;
474 if (tb[TCA_RSVP_SRC]) {
475 memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
476 h2 = hash_src(f->src);
477 }
478 if (tb[TCA_RSVP_PINFO]) {
479 pinfo = nla_data(tb[TCA_RSVP_PINFO]);
480 f->spi = pinfo->spi;
481 f->tunnelhdr = pinfo->tunnelhdr;
482 }
483 if (tb[TCA_RSVP_CLASSID])
484 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
485
486 dst = nla_data(tb[TCA_RSVP_DST]);
487 h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
488
489 err = -ENOMEM;
490 if ((f->handle = gen_handle(tp, h1 | (h2<<8))) == 0)
491 goto errout;
492
493 if (f->tunnelhdr) {
494 err = -EINVAL;
495 if (f->res.classid > 255)
496 goto errout;
497
498 err = -ENOMEM;
499 if (f->res.classid == 0 &&
500 (f->res.classid = gen_tunnel(data)) == 0)
501 goto errout;
502 }
503
504 for (sp = &data->ht[h1]; (s = *sp) != NULL; sp = &s->next) {
505 if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
506 pinfo && pinfo->protocol == s->protocol &&
507 memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
508#if RSVP_DST_LEN == 4
509 dst[0] == s->dst[0] &&
510 dst[1] == s->dst[1] &&
511 dst[2] == s->dst[2] &&
512#endif
513 pinfo->tunnelid == s->tunnelid) {
514
515insert:
516 /* OK, we found appropriate session */
517
518 fp = &s->ht[h2];
519
520 f->sess = s;
521 if (f->tunnelhdr == 0)
522 tcf_bind_filter(tp, &f->res, base);
523
524 tcf_exts_change(tp, &f->exts, &e);
525
526 for (fp = &s->ht[h2]; *fp; fp = &(*fp)->next)
527 if (((*fp)->spi.mask & f->spi.mask) != f->spi.mask)
528 break;
529 f->next = *fp;
530 wmb();
531 *fp = f;
532
533 *arg = (unsigned long)f;
534 return 0;
535 }
536 }
537
538 /* No session found. Create new one. */
539
540 err = -ENOBUFS;
541 s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL);
542 if (s == NULL)
543 goto errout;
544 memcpy(s->dst, dst, sizeof(s->dst));
545
546 if (pinfo) {
547 s->dpi = pinfo->dpi;
548 s->protocol = pinfo->protocol;
549 s->tunnelid = pinfo->tunnelid;
550 }
551 for (sp = &data->ht[h1]; *sp; sp = &(*sp)->next) {
552 if (((*sp)->dpi.mask&s->dpi.mask) != s->dpi.mask)
553 break;
554 }
555 s->next = *sp;
556 wmb();
557 *sp = s;
558
559 goto insert;
560
561errout:
562 kfree(f);
563errout2:
564 tcf_exts_destroy(tp, &e);
565 return err;
566}
567
568static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
569{
570 struct rsvp_head *head = tp->root;
571 unsigned int h, h1;
572
573 if (arg->stop)
574 return;
575
576 for (h = 0; h < 256; h++) {
577 struct rsvp_session *s;
578
579 for (s = head->ht[h]; s; s = s->next) {
580 for (h1 = 0; h1 <= 16; h1++) {
581 struct rsvp_filter *f;
582
583 for (f = s->ht[h1]; f; f = f->next) {
584 if (arg->count < arg->skip) {
585 arg->count++;
586 continue;
587 }
588 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
589 arg->stop = 1;
590 return;
591 }
592 arg->count++;
593 }
594 }
595 }
596 }
597}
598
599static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
600 struct sk_buff *skb, struct tcmsg *t)
601{
602 struct rsvp_filter *f = (struct rsvp_filter *)fh;
603 struct rsvp_session *s;
604 unsigned char *b = skb_tail_pointer(skb);
605 struct nlattr *nest;
606 struct tc_rsvp_pinfo pinfo;
607
608 if (f == NULL)
609 return skb->len;
610 s = f->sess;
611
612 t->tcm_handle = f->handle;
613
614 nest = nla_nest_start(skb, TCA_OPTIONS);
615 if (nest == NULL)
616 goto nla_put_failure;
617
618 if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
619 goto nla_put_failure;
620 pinfo.dpi = s->dpi;
621 pinfo.spi = f->spi;
622 pinfo.protocol = s->protocol;
623 pinfo.tunnelid = s->tunnelid;
624 pinfo.tunnelhdr = f->tunnelhdr;
625 pinfo.pad = 0;
626 if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
627 goto nla_put_failure;
628 if (f->res.classid &&
629 nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
630 goto nla_put_failure;
631 if (((f->handle >> 8) & 0xFF) != 16 &&
632 nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
633 goto nla_put_failure;
634
635 if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0)
636 goto nla_put_failure;
637
638 nla_nest_end(skb, nest);
639
640 if (tcf_exts_dump_stats(skb, &f->exts, &rsvp_ext_map) < 0)
641 goto nla_put_failure;
642 return skb->len;
643
644nla_put_failure:
645 nlmsg_trim(skb, b);
646 return -1;
647}
648
649static struct tcf_proto_ops RSVP_OPS __read_mostly = {
650 .kind = RSVP_ID,
651 .classify = rsvp_classify,
652 .init = rsvp_init,
653 .destroy = rsvp_destroy,
654 .get = rsvp_get,
655 .put = rsvp_put,
656 .change = rsvp_change,
657 .delete = rsvp_delete,
658 .walk = rsvp_walk,
659 .dump = rsvp_dump,
660 .owner = THIS_MODULE,
661};
662
663static int __init init_rsvp(void)
664{
665 return register_tcf_proto_ops(&RSVP_OPS);
666}
667
668static void __exit exit_rsvp(void)
669{
670 unregister_tcf_proto_ops(&RSVP_OPS);
671}
672
673module_init(init_rsvp)
674module_exit(exit_rsvp)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * net/sched/cls_rsvp.h Template file for RSVPv[46] classifiers.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 */
7
8/*
9 Comparing to general packet classification problem,
10 RSVP needs only sevaral relatively simple rules:
11
12 * (dst, protocol) are always specified,
13 so that we are able to hash them.
14 * src may be exact, or may be wildcard, so that
15 we can keep a hash table plus one wildcard entry.
16 * source port (or flow label) is important only if src is given.
17
18 IMPLEMENTATION.
19
20 We use a two level hash table: The top level is keyed by
21 destination address and protocol ID, every bucket contains a list
22 of "rsvp sessions", identified by destination address, protocol and
23 DPI(="Destination Port ID"): triple (key, mask, offset).
24
25 Every bucket has a smaller hash table keyed by source address
26 (cf. RSVP flowspec) and one wildcard entry for wildcard reservations.
27 Every bucket is again a list of "RSVP flows", selected by
28 source address and SPI(="Source Port ID" here rather than
29 "security parameter index"): triple (key, mask, offset).
30
31
32 NOTE 1. All the packets with IPv6 extension headers (but AH and ESP)
33 and all fragmented packets go to the best-effort traffic class.
34
35
36 NOTE 2. Two "port id"'s seems to be redundant, rfc2207 requires
37 only one "Generalized Port Identifier". So that for classic
38 ah, esp (and udp,tcp) both *pi should coincide or one of them
39 should be wildcard.
40
41 At first sight, this redundancy is just a waste of CPU
42 resources. But DPI and SPI add the possibility to assign different
43 priorities to GPIs. Look also at note 4 about tunnels below.
44
45
46 NOTE 3. One complication is the case of tunneled packets.
47 We implement it as following: if the first lookup
48 matches a special session with "tunnelhdr" value not zero,
49 flowid doesn't contain the true flow ID, but the tunnel ID (1...255).
50 In this case, we pull tunnelhdr bytes and restart lookup
51 with tunnel ID added to the list of keys. Simple and stupid 8)8)
52 It's enough for PIMREG and IPIP.
53
54
55 NOTE 4. Two GPIs make it possible to parse even GRE packets.
56 F.e. DPI can select ETH_P_IP (and necessary flags to make
57 tunnelhdr correct) in GRE protocol field and SPI matches
58 GRE key. Is it not nice? 8)8)
59
60
61 Well, as result, despite its simplicity, we get a pretty
62 powerful classification engine. */
63
64
65struct rsvp_head {
66 u32 tmap[256/32];
67 u32 hgenerator;
68 u8 tgenerator;
69 struct rsvp_session __rcu *ht[256];
70 struct rcu_head rcu;
71};
72
73struct rsvp_session {
74 struct rsvp_session __rcu *next;
75 __be32 dst[RSVP_DST_LEN];
76 struct tc_rsvp_gpi dpi;
77 u8 protocol;
78 u8 tunnelid;
79 /* 16 (src,sport) hash slots, and one wildcard source slot */
80 struct rsvp_filter __rcu *ht[16 + 1];
81 struct rcu_head rcu;
82};
83
84
85struct rsvp_filter {
86 struct rsvp_filter __rcu *next;
87 __be32 src[RSVP_DST_LEN];
88 struct tc_rsvp_gpi spi;
89 u8 tunnelhdr;
90
91 struct tcf_result res;
92 struct tcf_exts exts;
93
94 u32 handle;
95 struct rsvp_session *sess;
96 struct rcu_work rwork;
97};
98
99static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
100{
101 unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
102
103 h ^= h>>16;
104 h ^= h>>8;
105 return (h ^ protocol ^ tunnelid) & 0xFF;
106}
107
108static inline unsigned int hash_src(__be32 *src)
109{
110 unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
111
112 h ^= h>>16;
113 h ^= h>>8;
114 h ^= h>>4;
115 return h & 0xF;
116}
117
118#define RSVP_APPLY_RESULT() \
119{ \
120 int r = tcf_exts_exec(skb, &f->exts, res); \
121 if (r < 0) \
122 continue; \
123 else if (r > 0) \
124 return r; \
125}
126
127static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
128 struct tcf_result *res)
129{
130 struct rsvp_head *head = rcu_dereference_bh(tp->root);
131 struct rsvp_session *s;
132 struct rsvp_filter *f;
133 unsigned int h1, h2;
134 __be32 *dst, *src;
135 u8 protocol;
136 u8 tunnelid = 0;
137 u8 *xprt;
138#if RSVP_DST_LEN == 4
139 struct ipv6hdr *nhptr;
140
141 if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
142 return -1;
143 nhptr = ipv6_hdr(skb);
144#else
145 struct iphdr *nhptr;
146
147 if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
148 return -1;
149 nhptr = ip_hdr(skb);
150#endif
151restart:
152
153#if RSVP_DST_LEN == 4
154 src = &nhptr->saddr.s6_addr32[0];
155 dst = &nhptr->daddr.s6_addr32[0];
156 protocol = nhptr->nexthdr;
157 xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
158#else
159 src = &nhptr->saddr;
160 dst = &nhptr->daddr;
161 protocol = nhptr->protocol;
162 xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
163 if (ip_is_fragment(nhptr))
164 return -1;
165#endif
166
167 h1 = hash_dst(dst, protocol, tunnelid);
168 h2 = hash_src(src);
169
170 for (s = rcu_dereference_bh(head->ht[h1]); s;
171 s = rcu_dereference_bh(s->next)) {
172 if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
173 protocol == s->protocol &&
174 !(s->dpi.mask &
175 (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
176#if RSVP_DST_LEN == 4
177 dst[0] == s->dst[0] &&
178 dst[1] == s->dst[1] &&
179 dst[2] == s->dst[2] &&
180#endif
181 tunnelid == s->tunnelid) {
182
183 for (f = rcu_dereference_bh(s->ht[h2]); f;
184 f = rcu_dereference_bh(f->next)) {
185 if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
186 !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
187#if RSVP_DST_LEN == 4
188 &&
189 src[0] == f->src[0] &&
190 src[1] == f->src[1] &&
191 src[2] == f->src[2]
192#endif
193 ) {
194 *res = f->res;
195 RSVP_APPLY_RESULT();
196
197matched:
198 if (f->tunnelhdr == 0)
199 return 0;
200
201 tunnelid = f->res.classid;
202 nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
203 goto restart;
204 }
205 }
206
207 /* And wildcard bucket... */
208 for (f = rcu_dereference_bh(s->ht[16]); f;
209 f = rcu_dereference_bh(f->next)) {
210 *res = f->res;
211 RSVP_APPLY_RESULT();
212 goto matched;
213 }
214 return -1;
215 }
216 }
217 return -1;
218}
219
220static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h)
221{
222 struct rsvp_head *head = rtnl_dereference(tp->root);
223 struct rsvp_session *s;
224 struct rsvp_filter __rcu **ins;
225 struct rsvp_filter *pins;
226 unsigned int h1 = h & 0xFF;
227 unsigned int h2 = (h >> 8) & 0xFF;
228
229 for (s = rtnl_dereference(head->ht[h1]); s;
230 s = rtnl_dereference(s->next)) {
231 for (ins = &s->ht[h2], pins = rtnl_dereference(*ins); ;
232 ins = &pins->next, pins = rtnl_dereference(*ins)) {
233 if (pins->handle == h) {
234 RCU_INIT_POINTER(n->next, pins->next);
235 rcu_assign_pointer(*ins, n);
236 return;
237 }
238 }
239 }
240
241 /* Something went wrong if we are trying to replace a non-existant
242 * node. Mind as well halt instead of silently failing.
243 */
244 BUG_ON(1);
245}
246
247static void *rsvp_get(struct tcf_proto *tp, u32 handle)
248{
249 struct rsvp_head *head = rtnl_dereference(tp->root);
250 struct rsvp_session *s;
251 struct rsvp_filter *f;
252 unsigned int h1 = handle & 0xFF;
253 unsigned int h2 = (handle >> 8) & 0xFF;
254
255 if (h2 > 16)
256 return NULL;
257
258 for (s = rtnl_dereference(head->ht[h1]); s;
259 s = rtnl_dereference(s->next)) {
260 for (f = rtnl_dereference(s->ht[h2]); f;
261 f = rtnl_dereference(f->next)) {
262 if (f->handle == handle)
263 return f;
264 }
265 }
266 return NULL;
267}
268
269static int rsvp_init(struct tcf_proto *tp)
270{
271 struct rsvp_head *data;
272
273 data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL);
274 if (data) {
275 rcu_assign_pointer(tp->root, data);
276 return 0;
277 }
278 return -ENOBUFS;
279}
280
281static void __rsvp_delete_filter(struct rsvp_filter *f)
282{
283 tcf_exts_destroy(&f->exts);
284 tcf_exts_put_net(&f->exts);
285 kfree(f);
286}
287
288static void rsvp_delete_filter_work(struct work_struct *work)
289{
290 struct rsvp_filter *f = container_of(to_rcu_work(work),
291 struct rsvp_filter,
292 rwork);
293 rtnl_lock();
294 __rsvp_delete_filter(f);
295 rtnl_unlock();
296}
297
298static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
299{
300 tcf_unbind_filter(tp, &f->res);
301 /* all classifiers are required to call tcf_exts_destroy() after rcu
302 * grace period, since converted-to-rcu actions are relying on that
303 * in cleanup() callback
304 */
305 if (tcf_exts_get_net(&f->exts))
306 tcf_queue_work(&f->rwork, rsvp_delete_filter_work);
307 else
308 __rsvp_delete_filter(f);
309}
310
311static void rsvp_destroy(struct tcf_proto *tp, bool rtnl_held,
312 struct netlink_ext_ack *extack)
313{
314 struct rsvp_head *data = rtnl_dereference(tp->root);
315 int h1, h2;
316
317 if (data == NULL)
318 return;
319
320 for (h1 = 0; h1 < 256; h1++) {
321 struct rsvp_session *s;
322
323 while ((s = rtnl_dereference(data->ht[h1])) != NULL) {
324 RCU_INIT_POINTER(data->ht[h1], s->next);
325
326 for (h2 = 0; h2 <= 16; h2++) {
327 struct rsvp_filter *f;
328
329 while ((f = rtnl_dereference(s->ht[h2])) != NULL) {
330 rcu_assign_pointer(s->ht[h2], f->next);
331 rsvp_delete_filter(tp, f);
332 }
333 }
334 kfree_rcu(s, rcu);
335 }
336 }
337 kfree_rcu(data, rcu);
338}
339
340static int rsvp_delete(struct tcf_proto *tp, void *arg, bool *last,
341 bool rtnl_held, struct netlink_ext_ack *extack)
342{
343 struct rsvp_head *head = rtnl_dereference(tp->root);
344 struct rsvp_filter *nfp, *f = arg;
345 struct rsvp_filter __rcu **fp;
346 unsigned int h = f->handle;
347 struct rsvp_session __rcu **sp;
348 struct rsvp_session *nsp, *s = f->sess;
349 int i, h1;
350
351 fp = &s->ht[(h >> 8) & 0xFF];
352 for (nfp = rtnl_dereference(*fp); nfp;
353 fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
354 if (nfp == f) {
355 RCU_INIT_POINTER(*fp, f->next);
356 rsvp_delete_filter(tp, f);
357
358 /* Strip tree */
359
360 for (i = 0; i <= 16; i++)
361 if (s->ht[i])
362 goto out;
363
364 /* OK, session has no flows */
365 sp = &head->ht[h & 0xFF];
366 for (nsp = rtnl_dereference(*sp); nsp;
367 sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
368 if (nsp == s) {
369 RCU_INIT_POINTER(*sp, s->next);
370 kfree_rcu(s, rcu);
371 goto out;
372 }
373 }
374
375 break;
376 }
377 }
378
379out:
380 *last = true;
381 for (h1 = 0; h1 < 256; h1++) {
382 if (rcu_access_pointer(head->ht[h1])) {
383 *last = false;
384 break;
385 }
386 }
387
388 return 0;
389}
390
391static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
392{
393 struct rsvp_head *data = rtnl_dereference(tp->root);
394 int i = 0xFFFF;
395
396 while (i-- > 0) {
397 u32 h;
398
399 if ((data->hgenerator += 0x10000) == 0)
400 data->hgenerator = 0x10000;
401 h = data->hgenerator|salt;
402 if (!rsvp_get(tp, h))
403 return h;
404 }
405 return 0;
406}
407
408static int tunnel_bts(struct rsvp_head *data)
409{
410 int n = data->tgenerator >> 5;
411 u32 b = 1 << (data->tgenerator & 0x1F);
412
413 if (data->tmap[n] & b)
414 return 0;
415 data->tmap[n] |= b;
416 return 1;
417}
418
419static void tunnel_recycle(struct rsvp_head *data)
420{
421 struct rsvp_session __rcu **sht = data->ht;
422 u32 tmap[256/32];
423 int h1, h2;
424
425 memset(tmap, 0, sizeof(tmap));
426
427 for (h1 = 0; h1 < 256; h1++) {
428 struct rsvp_session *s;
429 for (s = rtnl_dereference(sht[h1]); s;
430 s = rtnl_dereference(s->next)) {
431 for (h2 = 0; h2 <= 16; h2++) {
432 struct rsvp_filter *f;
433
434 for (f = rtnl_dereference(s->ht[h2]); f;
435 f = rtnl_dereference(f->next)) {
436 if (f->tunnelhdr == 0)
437 continue;
438 data->tgenerator = f->res.classid;
439 tunnel_bts(data);
440 }
441 }
442 }
443 }
444
445 memcpy(data->tmap, tmap, sizeof(tmap));
446}
447
448static u32 gen_tunnel(struct rsvp_head *data)
449{
450 int i, k;
451
452 for (k = 0; k < 2; k++) {
453 for (i = 255; i > 0; i--) {
454 if (++data->tgenerator == 0)
455 data->tgenerator = 1;
456 if (tunnel_bts(data))
457 return data->tgenerator;
458 }
459 tunnel_recycle(data);
460 }
461 return 0;
462}
463
464static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
465 [TCA_RSVP_CLASSID] = { .type = NLA_U32 },
466 [TCA_RSVP_DST] = { .type = NLA_BINARY,
467 .len = RSVP_DST_LEN * sizeof(u32) },
468 [TCA_RSVP_SRC] = { .type = NLA_BINARY,
469 .len = RSVP_DST_LEN * sizeof(u32) },
470 [TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) },
471};
472
473static int rsvp_change(struct net *net, struct sk_buff *in_skb,
474 struct tcf_proto *tp, unsigned long base,
475 u32 handle,
476 struct nlattr **tca,
477 void **arg, bool ovr, bool rtnl_held,
478 struct netlink_ext_ack *extack)
479{
480 struct rsvp_head *data = rtnl_dereference(tp->root);
481 struct rsvp_filter *f, *nfp;
482 struct rsvp_filter __rcu **fp;
483 struct rsvp_session *nsp, *s;
484 struct rsvp_session __rcu **sp;
485 struct tc_rsvp_pinfo *pinfo = NULL;
486 struct nlattr *opt = tca[TCA_OPTIONS];
487 struct nlattr *tb[TCA_RSVP_MAX + 1];
488 struct tcf_exts e;
489 unsigned int h1, h2;
490 __be32 *dst;
491 int err;
492
493 if (opt == NULL)
494 return handle ? -EINVAL : 0;
495
496 err = nla_parse_nested_deprecated(tb, TCA_RSVP_MAX, opt, rsvp_policy,
497 NULL);
498 if (err < 0)
499 return err;
500
501 err = tcf_exts_init(&e, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
502 if (err < 0)
503 return err;
504 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr, true,
505 extack);
506 if (err < 0)
507 goto errout2;
508
509 f = *arg;
510 if (f) {
511 /* Node exists: adjust only classid */
512 struct rsvp_filter *n;
513
514 if (f->handle != handle && handle)
515 goto errout2;
516
517 n = kmemdup(f, sizeof(*f), GFP_KERNEL);
518 if (!n) {
519 err = -ENOMEM;
520 goto errout2;
521 }
522
523 err = tcf_exts_init(&n->exts, net, TCA_RSVP_ACT,
524 TCA_RSVP_POLICE);
525 if (err < 0) {
526 kfree(n);
527 goto errout2;
528 }
529
530 if (tb[TCA_RSVP_CLASSID]) {
531 n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
532 tcf_bind_filter(tp, &n->res, base);
533 }
534
535 tcf_exts_change(&n->exts, &e);
536 rsvp_replace(tp, n, handle);
537 return 0;
538 }
539
540 /* Now more serious part... */
541 err = -EINVAL;
542 if (handle)
543 goto errout2;
544 if (tb[TCA_RSVP_DST] == NULL)
545 goto errout2;
546
547 err = -ENOBUFS;
548 f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL);
549 if (f == NULL)
550 goto errout2;
551
552 err = tcf_exts_init(&f->exts, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
553 if (err < 0)
554 goto errout;
555 h2 = 16;
556 if (tb[TCA_RSVP_SRC]) {
557 memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
558 h2 = hash_src(f->src);
559 }
560 if (tb[TCA_RSVP_PINFO]) {
561 pinfo = nla_data(tb[TCA_RSVP_PINFO]);
562 f->spi = pinfo->spi;
563 f->tunnelhdr = pinfo->tunnelhdr;
564 }
565 if (tb[TCA_RSVP_CLASSID])
566 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
567
568 dst = nla_data(tb[TCA_RSVP_DST]);
569 h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
570
571 err = -ENOMEM;
572 if ((f->handle = gen_handle(tp, h1 | (h2<<8))) == 0)
573 goto errout;
574
575 if (f->tunnelhdr) {
576 err = -EINVAL;
577 if (f->res.classid > 255)
578 goto errout;
579
580 err = -ENOMEM;
581 if (f->res.classid == 0 &&
582 (f->res.classid = gen_tunnel(data)) == 0)
583 goto errout;
584 }
585
586 for (sp = &data->ht[h1];
587 (s = rtnl_dereference(*sp)) != NULL;
588 sp = &s->next) {
589 if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
590 pinfo && pinfo->protocol == s->protocol &&
591 memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
592#if RSVP_DST_LEN == 4
593 dst[0] == s->dst[0] &&
594 dst[1] == s->dst[1] &&
595 dst[2] == s->dst[2] &&
596#endif
597 pinfo->tunnelid == s->tunnelid) {
598
599insert:
600 /* OK, we found appropriate session */
601
602 fp = &s->ht[h2];
603
604 f->sess = s;
605 if (f->tunnelhdr == 0)
606 tcf_bind_filter(tp, &f->res, base);
607
608 tcf_exts_change(&f->exts, &e);
609
610 fp = &s->ht[h2];
611 for (nfp = rtnl_dereference(*fp); nfp;
612 fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
613 __u32 mask = nfp->spi.mask & f->spi.mask;
614
615 if (mask != f->spi.mask)
616 break;
617 }
618 RCU_INIT_POINTER(f->next, nfp);
619 rcu_assign_pointer(*fp, f);
620
621 *arg = f;
622 return 0;
623 }
624 }
625
626 /* No session found. Create new one. */
627
628 err = -ENOBUFS;
629 s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL);
630 if (s == NULL)
631 goto errout;
632 memcpy(s->dst, dst, sizeof(s->dst));
633
634 if (pinfo) {
635 s->dpi = pinfo->dpi;
636 s->protocol = pinfo->protocol;
637 s->tunnelid = pinfo->tunnelid;
638 }
639 sp = &data->ht[h1];
640 for (nsp = rtnl_dereference(*sp); nsp;
641 sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
642 if ((nsp->dpi.mask & s->dpi.mask) != s->dpi.mask)
643 break;
644 }
645 RCU_INIT_POINTER(s->next, nsp);
646 rcu_assign_pointer(*sp, s);
647
648 goto insert;
649
650errout:
651 tcf_exts_destroy(&f->exts);
652 kfree(f);
653errout2:
654 tcf_exts_destroy(&e);
655 return err;
656}
657
658static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg,
659 bool rtnl_held)
660{
661 struct rsvp_head *head = rtnl_dereference(tp->root);
662 unsigned int h, h1;
663
664 if (arg->stop)
665 return;
666
667 for (h = 0; h < 256; h++) {
668 struct rsvp_session *s;
669
670 for (s = rtnl_dereference(head->ht[h]); s;
671 s = rtnl_dereference(s->next)) {
672 for (h1 = 0; h1 <= 16; h1++) {
673 struct rsvp_filter *f;
674
675 for (f = rtnl_dereference(s->ht[h1]); f;
676 f = rtnl_dereference(f->next)) {
677 if (arg->count < arg->skip) {
678 arg->count++;
679 continue;
680 }
681 if (arg->fn(tp, f, arg) < 0) {
682 arg->stop = 1;
683 return;
684 }
685 arg->count++;
686 }
687 }
688 }
689 }
690}
691
692static int rsvp_dump(struct net *net, struct tcf_proto *tp, void *fh,
693 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
694{
695 struct rsvp_filter *f = fh;
696 struct rsvp_session *s;
697 struct nlattr *nest;
698 struct tc_rsvp_pinfo pinfo;
699
700 if (f == NULL)
701 return skb->len;
702 s = f->sess;
703
704 t->tcm_handle = f->handle;
705
706 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
707 if (nest == NULL)
708 goto nla_put_failure;
709
710 if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
711 goto nla_put_failure;
712 pinfo.dpi = s->dpi;
713 pinfo.spi = f->spi;
714 pinfo.protocol = s->protocol;
715 pinfo.tunnelid = s->tunnelid;
716 pinfo.tunnelhdr = f->tunnelhdr;
717 pinfo.pad = 0;
718 if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
719 goto nla_put_failure;
720 if (f->res.classid &&
721 nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
722 goto nla_put_failure;
723 if (((f->handle >> 8) & 0xFF) != 16 &&
724 nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
725 goto nla_put_failure;
726
727 if (tcf_exts_dump(skb, &f->exts) < 0)
728 goto nla_put_failure;
729
730 nla_nest_end(skb, nest);
731
732 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
733 goto nla_put_failure;
734 return skb->len;
735
736nla_put_failure:
737 nla_nest_cancel(skb, nest);
738 return -1;
739}
740
741static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl)
742{
743 struct rsvp_filter *f = fh;
744
745 if (f && f->res.classid == classid)
746 f->res.class = cl;
747}
748
749static struct tcf_proto_ops RSVP_OPS __read_mostly = {
750 .kind = RSVP_ID,
751 .classify = rsvp_classify,
752 .init = rsvp_init,
753 .destroy = rsvp_destroy,
754 .get = rsvp_get,
755 .change = rsvp_change,
756 .delete = rsvp_delete,
757 .walk = rsvp_walk,
758 .dump = rsvp_dump,
759 .bind_class = rsvp_bind_class,
760 .owner = THIS_MODULE,
761};
762
763static int __init init_rsvp(void)
764{
765 return register_tcf_proto_ops(&RSVP_OPS);
766}
767
768static void __exit exit_rsvp(void)
769{
770 unregister_tcf_proto_ops(&RSVP_OPS);
771}
772
773module_init(init_rsvp)
774module_exit(exit_rsvp)