Loading...
1/*
2 * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
3 *
4 * Written 1998,1999 by Werner Almesberger, EPFL ICA
5 */
6
7#include <linux/module.h>
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/skbuff.h>
11#include <linux/errno.h>
12#include <linux/slab.h>
13#include <net/act_api.h>
14#include <net/netlink.h>
15#include <net/pkt_cls.h>
16
17/*
18 * Passing parameters to the root seems to be done more awkwardly than really
19 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
20 * verified. FIXME.
21 */
22
23#define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
24#define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
25
26
27struct tcindex_filter_result {
28 struct tcf_exts exts;
29 struct tcf_result res;
30 struct rcu_head rcu;
31};
32
33struct tcindex_filter {
34 u16 key;
35 struct tcindex_filter_result result;
36 struct tcindex_filter __rcu *next;
37 struct rcu_head rcu;
38};
39
40
41struct tcindex_data {
42 struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
43 struct tcindex_filter __rcu **h; /* imperfect hash; */
44 struct tcf_proto *tp;
45 u16 mask; /* AND key with mask */
46 u32 shift; /* shift ANDed key to the right */
47 u32 hash; /* hash table size; 0 if undefined */
48 u32 alloc_hash; /* allocated size */
49 u32 fall_through; /* 0: only classify if explicit match */
50 struct rcu_head rcu;
51};
52
53static inline int
54tcindex_filter_is_set(struct tcindex_filter_result *r)
55{
56 return tcf_exts_is_predicative(&r->exts) || r->res.classid;
57}
58
59static struct tcindex_filter_result *
60tcindex_lookup(struct tcindex_data *p, u16 key)
61{
62 if (p->perfect) {
63 struct tcindex_filter_result *f = p->perfect + key;
64
65 return tcindex_filter_is_set(f) ? f : NULL;
66 } else if (p->h) {
67 struct tcindex_filter __rcu **fp;
68 struct tcindex_filter *f;
69
70 fp = &p->h[key % p->hash];
71 for (f = rcu_dereference_bh_rtnl(*fp);
72 f;
73 fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
74 if (f->key == key)
75 return &f->result;
76 }
77
78 return NULL;
79}
80
81
82static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
83 struct tcf_result *res)
84{
85 struct tcindex_data *p = rcu_dereference_bh(tp->root);
86 struct tcindex_filter_result *f;
87 int key = (skb->tc_index & p->mask) >> p->shift;
88
89 pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
90 skb, tp, res, p);
91
92 f = tcindex_lookup(p, key);
93 if (!f) {
94 if (!p->fall_through)
95 return -1;
96 res->classid = TC_H_MAKE(TC_H_MAJ(tp->q->handle), key);
97 res->class = 0;
98 pr_debug("alg 0x%x\n", res->classid);
99 return 0;
100 }
101 *res = f->res;
102 pr_debug("map 0x%x\n", res->classid);
103
104 return tcf_exts_exec(skb, &f->exts, res);
105}
106
107
108static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle)
109{
110 struct tcindex_data *p = rtnl_dereference(tp->root);
111 struct tcindex_filter_result *r;
112
113 pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
114 if (p->perfect && handle >= p->alloc_hash)
115 return 0;
116 r = tcindex_lookup(p, handle);
117 return r && tcindex_filter_is_set(r) ? (unsigned long) r : 0UL;
118}
119
120static int tcindex_init(struct tcf_proto *tp)
121{
122 struct tcindex_data *p;
123
124 pr_debug("tcindex_init(tp %p)\n", tp);
125 p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
126 if (!p)
127 return -ENOMEM;
128
129 p->mask = 0xffff;
130 p->hash = DEFAULT_HASH_SIZE;
131 p->fall_through = 1;
132
133 rcu_assign_pointer(tp->root, p);
134 return 0;
135}
136
137static void tcindex_destroy_rexts(struct rcu_head *head)
138{
139 struct tcindex_filter_result *r;
140
141 r = container_of(head, struct tcindex_filter_result, rcu);
142 tcf_exts_destroy(&r->exts);
143}
144
145static void tcindex_destroy_fexts(struct rcu_head *head)
146{
147 struct tcindex_filter *f = container_of(head, struct tcindex_filter, rcu);
148
149 tcf_exts_destroy(&f->result.exts);
150 kfree(f);
151}
152
153static int tcindex_delete(struct tcf_proto *tp, unsigned long arg)
154{
155 struct tcindex_data *p = rtnl_dereference(tp->root);
156 struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg;
157 struct tcindex_filter __rcu **walk;
158 struct tcindex_filter *f = NULL;
159
160 pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p\n", tp, arg, p);
161 if (p->perfect) {
162 if (!r->res.class)
163 return -ENOENT;
164 } else {
165 int i;
166
167 for (i = 0; i < p->hash; i++) {
168 walk = p->h + i;
169 for (f = rtnl_dereference(*walk); f;
170 walk = &f->next, f = rtnl_dereference(*walk)) {
171 if (&f->result == r)
172 goto found;
173 }
174 }
175 return -ENOENT;
176
177found:
178 rcu_assign_pointer(*walk, rtnl_dereference(f->next));
179 }
180 tcf_unbind_filter(tp, &r->res);
181 /* all classifiers are required to call tcf_exts_destroy() after rcu
182 * grace period, since converted-to-rcu actions are relying on that
183 * in cleanup() callback
184 */
185 if (f)
186 call_rcu(&f->rcu, tcindex_destroy_fexts);
187 else
188 call_rcu(&r->rcu, tcindex_destroy_rexts);
189 return 0;
190}
191
192static int tcindex_destroy_element(struct tcf_proto *tp,
193 unsigned long arg,
194 struct tcf_walker *walker)
195{
196 return tcindex_delete(tp, arg);
197}
198
199static void __tcindex_destroy(struct rcu_head *head)
200{
201 struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
202
203 kfree(p->perfect);
204 kfree(p->h);
205 kfree(p);
206}
207
208static inline int
209valid_perfect_hash(struct tcindex_data *p)
210{
211 return p->hash > (p->mask >> p->shift);
212}
213
214static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
215 [TCA_TCINDEX_HASH] = { .type = NLA_U32 },
216 [TCA_TCINDEX_MASK] = { .type = NLA_U16 },
217 [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 },
218 [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 },
219 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
220};
221
222static void tcindex_filter_result_init(struct tcindex_filter_result *r)
223{
224 memset(r, 0, sizeof(*r));
225 tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
226}
227
228static void __tcindex_partial_destroy(struct rcu_head *head)
229{
230 struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
231
232 kfree(p->perfect);
233 kfree(p);
234}
235
236static int
237tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
238 u32 handle, struct tcindex_data *p,
239 struct tcindex_filter_result *r, struct nlattr **tb,
240 struct nlattr *est, bool ovr)
241{
242 int err, balloc = 0;
243 struct tcindex_filter_result new_filter_result, *old_r = r;
244 struct tcindex_filter_result cr;
245 struct tcindex_data *cp, *oldp;
246 struct tcindex_filter *f = NULL; /* make gcc behave */
247 struct tcf_exts e;
248
249 tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
250 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
251 if (err < 0)
252 return err;
253
254 err = -ENOMEM;
255 /* tcindex_data attributes must look atomic to classifier/lookup so
256 * allocate new tcindex data and RCU assign it onto root. Keeping
257 * perfect hash and hash pointers from old data.
258 */
259 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
260 if (!cp)
261 goto errout;
262
263 cp->mask = p->mask;
264 cp->shift = p->shift;
265 cp->hash = p->hash;
266 cp->alloc_hash = p->alloc_hash;
267 cp->fall_through = p->fall_through;
268 cp->tp = tp;
269
270 if (p->perfect) {
271 int i;
272
273 cp->perfect = kmemdup(p->perfect,
274 sizeof(*r) * cp->hash, GFP_KERNEL);
275 if (!cp->perfect)
276 goto errout;
277 for (i = 0; i < cp->hash; i++)
278 tcf_exts_init(&cp->perfect[i].exts,
279 TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
280 balloc = 1;
281 }
282 cp->h = p->h;
283
284 tcindex_filter_result_init(&new_filter_result);
285 tcindex_filter_result_init(&cr);
286 if (old_r)
287 cr.res = r->res;
288
289 if (tb[TCA_TCINDEX_HASH])
290 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
291
292 if (tb[TCA_TCINDEX_MASK])
293 cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
294
295 if (tb[TCA_TCINDEX_SHIFT])
296 cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
297
298 err = -EBUSY;
299
300 /* Hash already allocated, make sure that we still meet the
301 * requirements for the allocated hash.
302 */
303 if (cp->perfect) {
304 if (!valid_perfect_hash(cp) ||
305 cp->hash > cp->alloc_hash)
306 goto errout_alloc;
307 } else if (cp->h && cp->hash != cp->alloc_hash) {
308 goto errout_alloc;
309 }
310
311 err = -EINVAL;
312 if (tb[TCA_TCINDEX_FALL_THROUGH])
313 cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
314
315 if (!cp->hash) {
316 /* Hash not specified, use perfect hash if the upper limit
317 * of the hashing index is below the threshold.
318 */
319 if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
320 cp->hash = (cp->mask >> cp->shift) + 1;
321 else
322 cp->hash = DEFAULT_HASH_SIZE;
323 }
324
325 if (!cp->perfect && !cp->h)
326 cp->alloc_hash = cp->hash;
327
328 /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
329 * but then, we'd fail handles that may become valid after some future
330 * mask change. While this is extremely unlikely to ever matter,
331 * the check below is safer (and also more backwards-compatible).
332 */
333 if (cp->perfect || valid_perfect_hash(cp))
334 if (handle >= cp->alloc_hash)
335 goto errout_alloc;
336
337
338 err = -ENOMEM;
339 if (!cp->perfect && !cp->h) {
340 if (valid_perfect_hash(cp)) {
341 int i;
342
343 cp->perfect = kcalloc(cp->hash, sizeof(*r), GFP_KERNEL);
344 if (!cp->perfect)
345 goto errout_alloc;
346 for (i = 0; i < cp->hash; i++)
347 tcf_exts_init(&cp->perfect[i].exts,
348 TCA_TCINDEX_ACT,
349 TCA_TCINDEX_POLICE);
350 balloc = 1;
351 } else {
352 struct tcindex_filter __rcu **hash;
353
354 hash = kcalloc(cp->hash,
355 sizeof(struct tcindex_filter *),
356 GFP_KERNEL);
357
358 if (!hash)
359 goto errout_alloc;
360
361 cp->h = hash;
362 balloc = 2;
363 }
364 }
365
366 if (cp->perfect)
367 r = cp->perfect + handle;
368 else
369 r = tcindex_lookup(cp, handle) ? : &new_filter_result;
370
371 if (r == &new_filter_result) {
372 f = kzalloc(sizeof(*f), GFP_KERNEL);
373 if (!f)
374 goto errout_alloc;
375 f->key = handle;
376 tcindex_filter_result_init(&f->result);
377 f->next = NULL;
378 }
379
380 if (tb[TCA_TCINDEX_CLASSID]) {
381 cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
382 tcf_bind_filter(tp, &cr.res, base);
383 }
384
385 if (old_r)
386 tcf_exts_change(tp, &r->exts, &e);
387 else
388 tcf_exts_change(tp, &cr.exts, &e);
389
390 if (old_r && old_r != r)
391 tcindex_filter_result_init(old_r);
392
393 oldp = p;
394 r->res = cr.res;
395 rcu_assign_pointer(tp->root, cp);
396
397 if (r == &new_filter_result) {
398 struct tcindex_filter *nfp;
399 struct tcindex_filter __rcu **fp;
400
401 tcf_exts_change(tp, &f->result.exts, &r->exts);
402
403 fp = cp->h + (handle % cp->hash);
404 for (nfp = rtnl_dereference(*fp);
405 nfp;
406 fp = &nfp->next, nfp = rtnl_dereference(*fp))
407 ; /* nothing */
408
409 rcu_assign_pointer(*fp, f);
410 }
411
412 if (oldp)
413 call_rcu(&oldp->rcu, __tcindex_partial_destroy);
414 return 0;
415
416errout_alloc:
417 if (balloc == 1)
418 kfree(cp->perfect);
419 else if (balloc == 2)
420 kfree(cp->h);
421errout:
422 kfree(cp);
423 tcf_exts_destroy(&e);
424 return err;
425}
426
427static int
428tcindex_change(struct net *net, struct sk_buff *in_skb,
429 struct tcf_proto *tp, unsigned long base, u32 handle,
430 struct nlattr **tca, unsigned long *arg, bool ovr)
431{
432 struct nlattr *opt = tca[TCA_OPTIONS];
433 struct nlattr *tb[TCA_TCINDEX_MAX + 1];
434 struct tcindex_data *p = rtnl_dereference(tp->root);
435 struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg;
436 int err;
437
438 pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
439 "p %p,r %p,*arg 0x%lx\n",
440 tp, handle, tca, arg, opt, p, r, arg ? *arg : 0L);
441
442 if (!opt)
443 return 0;
444
445 err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy);
446 if (err < 0)
447 return err;
448
449 return tcindex_set_parms(net, tp, base, handle, p, r, tb,
450 tca[TCA_RATE], ovr);
451}
452
453static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
454{
455 struct tcindex_data *p = rtnl_dereference(tp->root);
456 struct tcindex_filter *f, *next;
457 int i;
458
459 pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
460 if (p->perfect) {
461 for (i = 0; i < p->hash; i++) {
462 if (!p->perfect[i].res.class)
463 continue;
464 if (walker->count >= walker->skip) {
465 if (walker->fn(tp,
466 (unsigned long) (p->perfect+i), walker)
467 < 0) {
468 walker->stop = 1;
469 return;
470 }
471 }
472 walker->count++;
473 }
474 }
475 if (!p->h)
476 return;
477 for (i = 0; i < p->hash; i++) {
478 for (f = rtnl_dereference(p->h[i]); f; f = next) {
479 next = rtnl_dereference(f->next);
480 if (walker->count >= walker->skip) {
481 if (walker->fn(tp, (unsigned long) &f->result,
482 walker) < 0) {
483 walker->stop = 1;
484 return;
485 }
486 }
487 walker->count++;
488 }
489 }
490}
491
492static bool tcindex_destroy(struct tcf_proto *tp, bool force)
493{
494 struct tcindex_data *p = rtnl_dereference(tp->root);
495 struct tcf_walker walker;
496
497 if (!force)
498 return false;
499
500 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
501 walker.count = 0;
502 walker.skip = 0;
503 walker.fn = tcindex_destroy_element;
504 tcindex_walk(tp, &walker);
505
506 RCU_INIT_POINTER(tp->root, NULL);
507 call_rcu(&p->rcu, __tcindex_destroy);
508 return true;
509}
510
511
512static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
513 struct sk_buff *skb, struct tcmsg *t)
514{
515 struct tcindex_data *p = rtnl_dereference(tp->root);
516 struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh;
517 struct nlattr *nest;
518
519 pr_debug("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p\n",
520 tp, fh, skb, t, p, r);
521 pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
522
523 nest = nla_nest_start(skb, TCA_OPTIONS);
524 if (nest == NULL)
525 goto nla_put_failure;
526
527 if (!fh) {
528 t->tcm_handle = ~0; /* whatever ... */
529 if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
530 nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
531 nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
532 nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
533 goto nla_put_failure;
534 nla_nest_end(skb, nest);
535 } else {
536 if (p->perfect) {
537 t->tcm_handle = r - p->perfect;
538 } else {
539 struct tcindex_filter *f;
540 struct tcindex_filter __rcu **fp;
541 int i;
542
543 t->tcm_handle = 0;
544 for (i = 0; !t->tcm_handle && i < p->hash; i++) {
545 fp = &p->h[i];
546 for (f = rtnl_dereference(*fp);
547 !t->tcm_handle && f;
548 fp = &f->next, f = rtnl_dereference(*fp)) {
549 if (&f->result == r)
550 t->tcm_handle = f->key;
551 }
552 }
553 }
554 pr_debug("handle = %d\n", t->tcm_handle);
555 if (r->res.class &&
556 nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
557 goto nla_put_failure;
558
559 if (tcf_exts_dump(skb, &r->exts) < 0)
560 goto nla_put_failure;
561 nla_nest_end(skb, nest);
562
563 if (tcf_exts_dump_stats(skb, &r->exts) < 0)
564 goto nla_put_failure;
565 }
566
567 return skb->len;
568
569nla_put_failure:
570 nla_nest_cancel(skb, nest);
571 return -1;
572}
573
574static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
575 .kind = "tcindex",
576 .classify = tcindex_classify,
577 .init = tcindex_init,
578 .destroy = tcindex_destroy,
579 .get = tcindex_get,
580 .change = tcindex_change,
581 .delete = tcindex_delete,
582 .walk = tcindex_walk,
583 .dump = tcindex_dump,
584 .owner = THIS_MODULE,
585};
586
587static int __init init_tcindex(void)
588{
589 return register_tcf_proto_ops(&cls_tcindex_ops);
590}
591
592static void __exit exit_tcindex(void)
593{
594 unregister_tcf_proto_ops(&cls_tcindex_ops);
595}
596
597module_init(init_tcindex)
598module_exit(exit_tcindex)
599MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
4 *
5 * Written 1998,1999 by Werner Almesberger, EPFL ICA
6 */
7
8#include <linux/module.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/skbuff.h>
12#include <linux/errno.h>
13#include <linux/slab.h>
14#include <net/act_api.h>
15#include <net/netlink.h>
16#include <net/pkt_cls.h>
17#include <net/sch_generic.h>
18
19/*
20 * Passing parameters to the root seems to be done more awkwardly than really
21 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
22 * verified. FIXME.
23 */
24
25#define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
26#define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
27
28
29struct tcindex_filter_result {
30 struct tcf_exts exts;
31 struct tcf_result res;
32 struct rcu_work rwork;
33};
34
35struct tcindex_filter {
36 u16 key;
37 struct tcindex_filter_result result;
38 struct tcindex_filter __rcu *next;
39 struct rcu_work rwork;
40};
41
42
43struct tcindex_data {
44 struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
45 struct tcindex_filter __rcu **h; /* imperfect hash; */
46 struct tcf_proto *tp;
47 u16 mask; /* AND key with mask */
48 u32 shift; /* shift ANDed key to the right */
49 u32 hash; /* hash table size; 0 if undefined */
50 u32 alloc_hash; /* allocated size */
51 u32 fall_through; /* 0: only classify if explicit match */
52 struct rcu_work rwork;
53};
54
55static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
56{
57 return tcf_exts_has_actions(&r->exts) || r->res.classid;
58}
59
60static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
61 u16 key)
62{
63 if (p->perfect) {
64 struct tcindex_filter_result *f = p->perfect + key;
65
66 return tcindex_filter_is_set(f) ? f : NULL;
67 } else if (p->h) {
68 struct tcindex_filter __rcu **fp;
69 struct tcindex_filter *f;
70
71 fp = &p->h[key % p->hash];
72 for (f = rcu_dereference_bh_rtnl(*fp);
73 f;
74 fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
75 if (f->key == key)
76 return &f->result;
77 }
78
79 return NULL;
80}
81
82
83static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
84 struct tcf_result *res)
85{
86 struct tcindex_data *p = rcu_dereference_bh(tp->root);
87 struct tcindex_filter_result *f;
88 int key = (skb->tc_index & p->mask) >> p->shift;
89
90 pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
91 skb, tp, res, p);
92
93 f = tcindex_lookup(p, key);
94 if (!f) {
95 struct Qdisc *q = tcf_block_q(tp->chain->block);
96
97 if (!p->fall_through)
98 return -1;
99 res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key);
100 res->class = 0;
101 pr_debug("alg 0x%x\n", res->classid);
102 return 0;
103 }
104 *res = f->res;
105 pr_debug("map 0x%x\n", res->classid);
106
107 return tcf_exts_exec(skb, &f->exts, res);
108}
109
110
111static void *tcindex_get(struct tcf_proto *tp, u32 handle)
112{
113 struct tcindex_data *p = rtnl_dereference(tp->root);
114 struct tcindex_filter_result *r;
115
116 pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
117 if (p->perfect && handle >= p->alloc_hash)
118 return NULL;
119 r = tcindex_lookup(p, handle);
120 return r && tcindex_filter_is_set(r) ? r : NULL;
121}
122
123static int tcindex_init(struct tcf_proto *tp)
124{
125 struct tcindex_data *p;
126
127 pr_debug("tcindex_init(tp %p)\n", tp);
128 p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
129 if (!p)
130 return -ENOMEM;
131
132 p->mask = 0xffff;
133 p->hash = DEFAULT_HASH_SIZE;
134 p->fall_through = 1;
135
136 rcu_assign_pointer(tp->root, p);
137 return 0;
138}
139
140static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
141{
142 tcf_exts_destroy(&r->exts);
143 tcf_exts_put_net(&r->exts);
144}
145
146static void tcindex_destroy_rexts_work(struct work_struct *work)
147{
148 struct tcindex_filter_result *r;
149
150 r = container_of(to_rcu_work(work),
151 struct tcindex_filter_result,
152 rwork);
153 rtnl_lock();
154 __tcindex_destroy_rexts(r);
155 rtnl_unlock();
156}
157
158static void __tcindex_destroy_fexts(struct tcindex_filter *f)
159{
160 tcf_exts_destroy(&f->result.exts);
161 tcf_exts_put_net(&f->result.exts);
162 kfree(f);
163}
164
165static void tcindex_destroy_fexts_work(struct work_struct *work)
166{
167 struct tcindex_filter *f = container_of(to_rcu_work(work),
168 struct tcindex_filter,
169 rwork);
170
171 rtnl_lock();
172 __tcindex_destroy_fexts(f);
173 rtnl_unlock();
174}
175
176static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
177 bool rtnl_held, struct netlink_ext_ack *extack)
178{
179 struct tcindex_data *p = rtnl_dereference(tp->root);
180 struct tcindex_filter_result *r = arg;
181 struct tcindex_filter __rcu **walk;
182 struct tcindex_filter *f = NULL;
183
184 pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
185 if (p->perfect) {
186 if (!r->res.class)
187 return -ENOENT;
188 } else {
189 int i;
190
191 for (i = 0; i < p->hash; i++) {
192 walk = p->h + i;
193 for (f = rtnl_dereference(*walk); f;
194 walk = &f->next, f = rtnl_dereference(*walk)) {
195 if (&f->result == r)
196 goto found;
197 }
198 }
199 return -ENOENT;
200
201found:
202 rcu_assign_pointer(*walk, rtnl_dereference(f->next));
203 }
204 tcf_unbind_filter(tp, &r->res);
205 /* all classifiers are required to call tcf_exts_destroy() after rcu
206 * grace period, since converted-to-rcu actions are relying on that
207 * in cleanup() callback
208 */
209 if (f) {
210 if (tcf_exts_get_net(&f->result.exts))
211 tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work);
212 else
213 __tcindex_destroy_fexts(f);
214 } else {
215 if (tcf_exts_get_net(&r->exts))
216 tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
217 else
218 __tcindex_destroy_rexts(r);
219 }
220
221 *last = false;
222 return 0;
223}
224
225static void tcindex_destroy_work(struct work_struct *work)
226{
227 struct tcindex_data *p = container_of(to_rcu_work(work),
228 struct tcindex_data,
229 rwork);
230
231 kfree(p->perfect);
232 kfree(p->h);
233 kfree(p);
234}
235
236static inline int
237valid_perfect_hash(struct tcindex_data *p)
238{
239 return p->hash > (p->mask >> p->shift);
240}
241
242static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
243 [TCA_TCINDEX_HASH] = { .type = NLA_U32 },
244 [TCA_TCINDEX_MASK] = { .type = NLA_U16 },
245 [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 },
246 [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 },
247 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
248};
249
250static int tcindex_filter_result_init(struct tcindex_filter_result *r,
251 struct net *net)
252{
253 memset(r, 0, sizeof(*r));
254 return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
255 TCA_TCINDEX_POLICE);
256}
257
258static void tcindex_partial_destroy_work(struct work_struct *work)
259{
260 struct tcindex_data *p = container_of(to_rcu_work(work),
261 struct tcindex_data,
262 rwork);
263
264 kfree(p->perfect);
265 kfree(p);
266}
267
268static void tcindex_free_perfect_hash(struct tcindex_data *cp)
269{
270 int i;
271
272 for (i = 0; i < cp->hash; i++)
273 tcf_exts_destroy(&cp->perfect[i].exts);
274 kfree(cp->perfect);
275}
276
277static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
278{
279 int i, err = 0;
280
281 cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
282 GFP_KERNEL);
283 if (!cp->perfect)
284 return -ENOMEM;
285
286 for (i = 0; i < cp->hash; i++) {
287 err = tcf_exts_init(&cp->perfect[i].exts, net,
288 TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
289 if (err < 0)
290 goto errout;
291 }
292
293 return 0;
294
295errout:
296 tcindex_free_perfect_hash(cp);
297 return err;
298}
299
300static int
301tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
302 u32 handle, struct tcindex_data *p,
303 struct tcindex_filter_result *r, struct nlattr **tb,
304 struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
305{
306 struct tcindex_filter_result new_filter_result, *old_r = r;
307 struct tcindex_data *cp = NULL, *oldp;
308 struct tcindex_filter *f = NULL; /* make gcc behave */
309 struct tcf_result cr = {};
310 int err, balloc = 0;
311 struct tcf_exts e;
312
313 err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
314 if (err < 0)
315 return err;
316 err = tcf_exts_validate(net, tp, tb, est, &e, ovr, true, extack);
317 if (err < 0)
318 goto errout;
319
320 err = -ENOMEM;
321 /* tcindex_data attributes must look atomic to classifier/lookup so
322 * allocate new tcindex data and RCU assign it onto root. Keeping
323 * perfect hash and hash pointers from old data.
324 */
325 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
326 if (!cp)
327 goto errout;
328
329 cp->mask = p->mask;
330 cp->shift = p->shift;
331 cp->hash = p->hash;
332 cp->alloc_hash = p->alloc_hash;
333 cp->fall_through = p->fall_through;
334 cp->tp = tp;
335
336 if (p->perfect) {
337 int i;
338
339 if (tcindex_alloc_perfect_hash(net, cp) < 0)
340 goto errout;
341 for (i = 0; i < cp->hash; i++)
342 cp->perfect[i].res = p->perfect[i].res;
343 balloc = 1;
344 }
345 cp->h = p->h;
346
347 err = tcindex_filter_result_init(&new_filter_result, net);
348 if (err < 0)
349 goto errout1;
350 if (old_r)
351 cr = r->res;
352
353 if (tb[TCA_TCINDEX_HASH])
354 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
355
356 if (tb[TCA_TCINDEX_MASK])
357 cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
358
359 if (tb[TCA_TCINDEX_SHIFT])
360 cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
361
362 err = -EBUSY;
363
364 /* Hash already allocated, make sure that we still meet the
365 * requirements for the allocated hash.
366 */
367 if (cp->perfect) {
368 if (!valid_perfect_hash(cp) ||
369 cp->hash > cp->alloc_hash)
370 goto errout_alloc;
371 } else if (cp->h && cp->hash != cp->alloc_hash) {
372 goto errout_alloc;
373 }
374
375 err = -EINVAL;
376 if (tb[TCA_TCINDEX_FALL_THROUGH])
377 cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
378
379 if (!cp->hash) {
380 /* Hash not specified, use perfect hash if the upper limit
381 * of the hashing index is below the threshold.
382 */
383 if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
384 cp->hash = (cp->mask >> cp->shift) + 1;
385 else
386 cp->hash = DEFAULT_HASH_SIZE;
387 }
388
389 if (!cp->perfect && !cp->h)
390 cp->alloc_hash = cp->hash;
391
392 /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
393 * but then, we'd fail handles that may become valid after some future
394 * mask change. While this is extremely unlikely to ever matter,
395 * the check below is safer (and also more backwards-compatible).
396 */
397 if (cp->perfect || valid_perfect_hash(cp))
398 if (handle >= cp->alloc_hash)
399 goto errout_alloc;
400
401
402 err = -ENOMEM;
403 if (!cp->perfect && !cp->h) {
404 if (valid_perfect_hash(cp)) {
405 if (tcindex_alloc_perfect_hash(net, cp) < 0)
406 goto errout_alloc;
407 balloc = 1;
408 } else {
409 struct tcindex_filter __rcu **hash;
410
411 hash = kcalloc(cp->hash,
412 sizeof(struct tcindex_filter *),
413 GFP_KERNEL);
414
415 if (!hash)
416 goto errout_alloc;
417
418 cp->h = hash;
419 balloc = 2;
420 }
421 }
422
423 if (cp->perfect)
424 r = cp->perfect + handle;
425 else
426 r = tcindex_lookup(cp, handle) ? : &new_filter_result;
427
428 if (r == &new_filter_result) {
429 f = kzalloc(sizeof(*f), GFP_KERNEL);
430 if (!f)
431 goto errout_alloc;
432 f->key = handle;
433 f->next = NULL;
434 err = tcindex_filter_result_init(&f->result, net);
435 if (err < 0) {
436 kfree(f);
437 goto errout_alloc;
438 }
439 }
440
441 if (tb[TCA_TCINDEX_CLASSID]) {
442 cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
443 tcf_bind_filter(tp, &cr, base);
444 }
445
446 if (old_r && old_r != r) {
447 err = tcindex_filter_result_init(old_r, net);
448 if (err < 0) {
449 kfree(f);
450 goto errout_alloc;
451 }
452 }
453
454 oldp = p;
455 r->res = cr;
456 tcf_exts_change(&r->exts, &e);
457
458 rcu_assign_pointer(tp->root, cp);
459
460 if (r == &new_filter_result) {
461 struct tcindex_filter *nfp;
462 struct tcindex_filter __rcu **fp;
463
464 f->result.res = r->res;
465 tcf_exts_change(&f->result.exts, &r->exts);
466
467 fp = cp->h + (handle % cp->hash);
468 for (nfp = rtnl_dereference(*fp);
469 nfp;
470 fp = &nfp->next, nfp = rtnl_dereference(*fp))
471 ; /* nothing */
472
473 rcu_assign_pointer(*fp, f);
474 } else {
475 tcf_exts_destroy(&new_filter_result.exts);
476 }
477
478 if (oldp)
479 tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
480 return 0;
481
482errout_alloc:
483 if (balloc == 1)
484 tcindex_free_perfect_hash(cp);
485 else if (balloc == 2)
486 kfree(cp->h);
487errout1:
488 tcf_exts_destroy(&new_filter_result.exts);
489errout:
490 kfree(cp);
491 tcf_exts_destroy(&e);
492 return err;
493}
494
495static int
496tcindex_change(struct net *net, struct sk_buff *in_skb,
497 struct tcf_proto *tp, unsigned long base, u32 handle,
498 struct nlattr **tca, void **arg, bool ovr,
499 bool rtnl_held, struct netlink_ext_ack *extack)
500{
501 struct nlattr *opt = tca[TCA_OPTIONS];
502 struct nlattr *tb[TCA_TCINDEX_MAX + 1];
503 struct tcindex_data *p = rtnl_dereference(tp->root);
504 struct tcindex_filter_result *r = *arg;
505 int err;
506
507 pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
508 "p %p,r %p,*arg %p\n",
509 tp, handle, tca, arg, opt, p, r, arg ? *arg : NULL);
510
511 if (!opt)
512 return 0;
513
514 err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt,
515 tcindex_policy, NULL);
516 if (err < 0)
517 return err;
518
519 return tcindex_set_parms(net, tp, base, handle, p, r, tb,
520 tca[TCA_RATE], ovr, extack);
521}
522
523static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker,
524 bool rtnl_held)
525{
526 struct tcindex_data *p = rtnl_dereference(tp->root);
527 struct tcindex_filter *f, *next;
528 int i;
529
530 pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
531 if (p->perfect) {
532 for (i = 0; i < p->hash; i++) {
533 if (!p->perfect[i].res.class)
534 continue;
535 if (walker->count >= walker->skip) {
536 if (walker->fn(tp, p->perfect + i, walker) < 0) {
537 walker->stop = 1;
538 return;
539 }
540 }
541 walker->count++;
542 }
543 }
544 if (!p->h)
545 return;
546 for (i = 0; i < p->hash; i++) {
547 for (f = rtnl_dereference(p->h[i]); f; f = next) {
548 next = rtnl_dereference(f->next);
549 if (walker->count >= walker->skip) {
550 if (walker->fn(tp, &f->result, walker) < 0) {
551 walker->stop = 1;
552 return;
553 }
554 }
555 walker->count++;
556 }
557 }
558}
559
560static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
561 struct netlink_ext_ack *extack)
562{
563 struct tcindex_data *p = rtnl_dereference(tp->root);
564 int i;
565
566 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
567
568 if (p->perfect) {
569 for (i = 0; i < p->hash; i++) {
570 struct tcindex_filter_result *r = p->perfect + i;
571
572 tcf_unbind_filter(tp, &r->res);
573 if (tcf_exts_get_net(&r->exts))
574 tcf_queue_work(&r->rwork,
575 tcindex_destroy_rexts_work);
576 else
577 __tcindex_destroy_rexts(r);
578 }
579 }
580
581 for (i = 0; p->h && i < p->hash; i++) {
582 struct tcindex_filter *f, *next;
583 bool last;
584
585 for (f = rtnl_dereference(p->h[i]); f; f = next) {
586 next = rtnl_dereference(f->next);
587 tcindex_delete(tp, &f->result, &last, rtnl_held, NULL);
588 }
589 }
590
591 tcf_queue_work(&p->rwork, tcindex_destroy_work);
592}
593
594
595static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
596 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
597{
598 struct tcindex_data *p = rtnl_dereference(tp->root);
599 struct tcindex_filter_result *r = fh;
600 struct nlattr *nest;
601
602 pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
603 tp, fh, skb, t, p, r);
604 pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
605
606 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
607 if (nest == NULL)
608 goto nla_put_failure;
609
610 if (!fh) {
611 t->tcm_handle = ~0; /* whatever ... */
612 if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
613 nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
614 nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
615 nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
616 goto nla_put_failure;
617 nla_nest_end(skb, nest);
618 } else {
619 if (p->perfect) {
620 t->tcm_handle = r - p->perfect;
621 } else {
622 struct tcindex_filter *f;
623 struct tcindex_filter __rcu **fp;
624 int i;
625
626 t->tcm_handle = 0;
627 for (i = 0; !t->tcm_handle && i < p->hash; i++) {
628 fp = &p->h[i];
629 for (f = rtnl_dereference(*fp);
630 !t->tcm_handle && f;
631 fp = &f->next, f = rtnl_dereference(*fp)) {
632 if (&f->result == r)
633 t->tcm_handle = f->key;
634 }
635 }
636 }
637 pr_debug("handle = %d\n", t->tcm_handle);
638 if (r->res.class &&
639 nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
640 goto nla_put_failure;
641
642 if (tcf_exts_dump(skb, &r->exts) < 0)
643 goto nla_put_failure;
644 nla_nest_end(skb, nest);
645
646 if (tcf_exts_dump_stats(skb, &r->exts) < 0)
647 goto nla_put_failure;
648 }
649
650 return skb->len;
651
652nla_put_failure:
653 nla_nest_cancel(skb, nest);
654 return -1;
655}
656
657static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl)
658{
659 struct tcindex_filter_result *r = fh;
660
661 if (r && r->res.classid == classid)
662 r->res.class = cl;
663}
664
665static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
666 .kind = "tcindex",
667 .classify = tcindex_classify,
668 .init = tcindex_init,
669 .destroy = tcindex_destroy,
670 .get = tcindex_get,
671 .change = tcindex_change,
672 .delete = tcindex_delete,
673 .walk = tcindex_walk,
674 .dump = tcindex_dump,
675 .bind_class = tcindex_bind_class,
676 .owner = THIS_MODULE,
677};
678
679static int __init init_tcindex(void)
680{
681 return register_tcf_proto_ops(&cls_tcindex_ops);
682}
683
684static void __exit exit_tcindex(void)
685{
686 unregister_tcf_proto_ops(&cls_tcindex_ops);
687}
688
689module_init(init_tcindex)
690module_exit(exit_tcindex)
691MODULE_LICENSE("GPL");