Loading...
1/*
2 * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
3 *
4 * Written 1998,1999 by Werner Almesberger, EPFL ICA
5 */
6
7#include <linux/module.h>
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/skbuff.h>
11#include <linux/errno.h>
12#include <linux/slab.h>
13#include <net/act_api.h>
14#include <net/netlink.h>
15#include <net/pkt_cls.h>
16
17/*
18 * Passing parameters to the root seems to be done more awkwardly than really
19 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
20 * verified. FIXME.
21 */
22
23#define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
24#define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
25
26
27#define PRIV(tp) ((struct tcindex_data *) (tp)->root)
28
29
30struct tcindex_filter_result {
31 struct tcf_exts exts;
32 struct tcf_result res;
33};
34
35struct tcindex_filter {
36 u16 key;
37 struct tcindex_filter_result result;
38 struct tcindex_filter *next;
39};
40
41
42struct tcindex_data {
43 struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
44 struct tcindex_filter **h; /* imperfect hash; only used if !perfect;
45 NULL if unused */
46 u16 mask; /* AND key with mask */
47 int shift; /* shift ANDed key to the right */
48 int hash; /* hash table size; 0 if undefined */
49 int alloc_hash; /* allocated size */
50 int fall_through; /* 0: only classify if explicit match */
51};
52
53static const struct tcf_ext_map tcindex_ext_map = {
54 .police = TCA_TCINDEX_POLICE,
55 .action = TCA_TCINDEX_ACT
56};
57
58static inline int
59tcindex_filter_is_set(struct tcindex_filter_result *r)
60{
61 return tcf_exts_is_predicative(&r->exts) || r->res.classid;
62}
63
64static struct tcindex_filter_result *
65tcindex_lookup(struct tcindex_data *p, u16 key)
66{
67 struct tcindex_filter *f;
68
69 if (p->perfect)
70 return tcindex_filter_is_set(p->perfect + key) ?
71 p->perfect + key : NULL;
72 else if (p->h) {
73 for (f = p->h[key % p->hash]; f; f = f->next)
74 if (f->key == key)
75 return &f->result;
76 }
77
78 return NULL;
79}
80
81
82static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
83 struct tcf_result *res)
84{
85 struct tcindex_data *p = PRIV(tp);
86 struct tcindex_filter_result *f;
87 int key = (skb->tc_index & p->mask) >> p->shift;
88
89 pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
90 skb, tp, res, p);
91
92 f = tcindex_lookup(p, key);
93 if (!f) {
94 if (!p->fall_through)
95 return -1;
96 res->classid = TC_H_MAKE(TC_H_MAJ(tp->q->handle), key);
97 res->class = 0;
98 pr_debug("alg 0x%x\n", res->classid);
99 return 0;
100 }
101 *res = f->res;
102 pr_debug("map 0x%x\n", res->classid);
103
104 return tcf_exts_exec(skb, &f->exts, res);
105}
106
107
108static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle)
109{
110 struct tcindex_data *p = PRIV(tp);
111 struct tcindex_filter_result *r;
112
113 pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
114 if (p->perfect && handle >= p->alloc_hash)
115 return 0;
116 r = tcindex_lookup(p, handle);
117 return r && tcindex_filter_is_set(r) ? (unsigned long) r : 0UL;
118}
119
120
121static void tcindex_put(struct tcf_proto *tp, unsigned long f)
122{
123 pr_debug("tcindex_put(tp %p,f 0x%lx)\n", tp, f);
124}
125
126
127static int tcindex_init(struct tcf_proto *tp)
128{
129 struct tcindex_data *p;
130
131 pr_debug("tcindex_init(tp %p)\n", tp);
132 p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
133 if (!p)
134 return -ENOMEM;
135
136 p->mask = 0xffff;
137 p->hash = DEFAULT_HASH_SIZE;
138 p->fall_through = 1;
139
140 tp->root = p;
141 return 0;
142}
143
144
145static int
146__tcindex_delete(struct tcf_proto *tp, unsigned long arg, int lock)
147{
148 struct tcindex_data *p = PRIV(tp);
149 struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg;
150 struct tcindex_filter *f = NULL;
151
152 pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p,f %p\n", tp, arg, p, f);
153 if (p->perfect) {
154 if (!r->res.class)
155 return -ENOENT;
156 } else {
157 int i;
158 struct tcindex_filter **walk = NULL;
159
160 for (i = 0; i < p->hash; i++)
161 for (walk = p->h+i; *walk; walk = &(*walk)->next)
162 if (&(*walk)->result == r)
163 goto found;
164 return -ENOENT;
165
166found:
167 f = *walk;
168 if (lock)
169 tcf_tree_lock(tp);
170 *walk = f->next;
171 if (lock)
172 tcf_tree_unlock(tp);
173 }
174 tcf_unbind_filter(tp, &r->res);
175 tcf_exts_destroy(tp, &r->exts);
176 kfree(f);
177 return 0;
178}
179
180static int tcindex_delete(struct tcf_proto *tp, unsigned long arg)
181{
182 return __tcindex_delete(tp, arg, 1);
183}
184
185static inline int
186valid_perfect_hash(struct tcindex_data *p)
187{
188 return p->hash > (p->mask >> p->shift);
189}
190
191static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
192 [TCA_TCINDEX_HASH] = { .type = NLA_U32 },
193 [TCA_TCINDEX_MASK] = { .type = NLA_U16 },
194 [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 },
195 [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 },
196 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
197};
198
199static int
200tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
201 struct tcindex_data *p, struct tcindex_filter_result *r,
202 struct nlattr **tb, struct nlattr *est)
203{
204 int err, balloc = 0;
205 struct tcindex_filter_result new_filter_result, *old_r = r;
206 struct tcindex_filter_result cr;
207 struct tcindex_data cp;
208 struct tcindex_filter *f = NULL; /* make gcc behave */
209 struct tcf_exts e;
210
211 err = tcf_exts_validate(tp, tb, est, &e, &tcindex_ext_map);
212 if (err < 0)
213 return err;
214
215 memcpy(&cp, p, sizeof(cp));
216 memset(&new_filter_result, 0, sizeof(new_filter_result));
217
218 if (old_r)
219 memcpy(&cr, r, sizeof(cr));
220 else
221 memset(&cr, 0, sizeof(cr));
222
223 if (tb[TCA_TCINDEX_HASH])
224 cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
225
226 if (tb[TCA_TCINDEX_MASK])
227 cp.mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
228
229 if (tb[TCA_TCINDEX_SHIFT])
230 cp.shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
231
232 err = -EBUSY;
233 /* Hash already allocated, make sure that we still meet the
234 * requirements for the allocated hash.
235 */
236 if (cp.perfect) {
237 if (!valid_perfect_hash(&cp) ||
238 cp.hash > cp.alloc_hash)
239 goto errout;
240 } else if (cp.h && cp.hash != cp.alloc_hash)
241 goto errout;
242
243 err = -EINVAL;
244 if (tb[TCA_TCINDEX_FALL_THROUGH])
245 cp.fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
246
247 if (!cp.hash) {
248 /* Hash not specified, use perfect hash if the upper limit
249 * of the hashing index is below the threshold.
250 */
251 if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD)
252 cp.hash = (cp.mask >> cp.shift) + 1;
253 else
254 cp.hash = DEFAULT_HASH_SIZE;
255 }
256
257 if (!cp.perfect && !cp.h)
258 cp.alloc_hash = cp.hash;
259
260 /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
261 * but then, we'd fail handles that may become valid after some future
262 * mask change. While this is extremely unlikely to ever matter,
263 * the check below is safer (and also more backwards-compatible).
264 */
265 if (cp.perfect || valid_perfect_hash(&cp))
266 if (handle >= cp.alloc_hash)
267 goto errout;
268
269
270 err = -ENOMEM;
271 if (!cp.perfect && !cp.h) {
272 if (valid_perfect_hash(&cp)) {
273 cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
274 if (!cp.perfect)
275 goto errout;
276 balloc = 1;
277 } else {
278 cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
279 if (!cp.h)
280 goto errout;
281 balloc = 2;
282 }
283 }
284
285 if (cp.perfect)
286 r = cp.perfect + handle;
287 else
288 r = tcindex_lookup(&cp, handle) ? : &new_filter_result;
289
290 if (r == &new_filter_result) {
291 f = kzalloc(sizeof(*f), GFP_KERNEL);
292 if (!f)
293 goto errout_alloc;
294 }
295
296 if (tb[TCA_TCINDEX_CLASSID]) {
297 cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
298 tcf_bind_filter(tp, &cr.res, base);
299 }
300
301 tcf_exts_change(tp, &cr.exts, &e);
302
303 tcf_tree_lock(tp);
304 if (old_r && old_r != r)
305 memset(old_r, 0, sizeof(*old_r));
306
307 memcpy(p, &cp, sizeof(cp));
308 memcpy(r, &cr, sizeof(cr));
309
310 if (r == &new_filter_result) {
311 struct tcindex_filter **fp;
312
313 f->key = handle;
314 f->result = new_filter_result;
315 f->next = NULL;
316 for (fp = p->h+(handle % p->hash); *fp; fp = &(*fp)->next)
317 /* nothing */;
318 *fp = f;
319 }
320 tcf_tree_unlock(tp);
321
322 return 0;
323
324errout_alloc:
325 if (balloc == 1)
326 kfree(cp.perfect);
327 else if (balloc == 2)
328 kfree(cp.h);
329errout:
330 tcf_exts_destroy(tp, &e);
331 return err;
332}
333
334static int
335tcindex_change(struct tcf_proto *tp, unsigned long base, u32 handle,
336 struct nlattr **tca, unsigned long *arg)
337{
338 struct nlattr *opt = tca[TCA_OPTIONS];
339 struct nlattr *tb[TCA_TCINDEX_MAX + 1];
340 struct tcindex_data *p = PRIV(tp);
341 struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg;
342 int err;
343
344 pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
345 "p %p,r %p,*arg 0x%lx\n",
346 tp, handle, tca, arg, opt, p, r, arg ? *arg : 0L);
347
348 if (!opt)
349 return 0;
350
351 err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy);
352 if (err < 0)
353 return err;
354
355 return tcindex_set_parms(tp, base, handle, p, r, tb, tca[TCA_RATE]);
356}
357
358
359static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
360{
361 struct tcindex_data *p = PRIV(tp);
362 struct tcindex_filter *f, *next;
363 int i;
364
365 pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
366 if (p->perfect) {
367 for (i = 0; i < p->hash; i++) {
368 if (!p->perfect[i].res.class)
369 continue;
370 if (walker->count >= walker->skip) {
371 if (walker->fn(tp,
372 (unsigned long) (p->perfect+i), walker)
373 < 0) {
374 walker->stop = 1;
375 return;
376 }
377 }
378 walker->count++;
379 }
380 }
381 if (!p->h)
382 return;
383 for (i = 0; i < p->hash; i++) {
384 for (f = p->h[i]; f; f = next) {
385 next = f->next;
386 if (walker->count >= walker->skip) {
387 if (walker->fn(tp, (unsigned long) &f->result,
388 walker) < 0) {
389 walker->stop = 1;
390 return;
391 }
392 }
393 walker->count++;
394 }
395 }
396}
397
398
399static int tcindex_destroy_element(struct tcf_proto *tp,
400 unsigned long arg, struct tcf_walker *walker)
401{
402 return __tcindex_delete(tp, arg, 0);
403}
404
405
406static void tcindex_destroy(struct tcf_proto *tp)
407{
408 struct tcindex_data *p = PRIV(tp);
409 struct tcf_walker walker;
410
411 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
412 walker.count = 0;
413 walker.skip = 0;
414 walker.fn = &tcindex_destroy_element;
415 tcindex_walk(tp, &walker);
416 kfree(p->perfect);
417 kfree(p->h);
418 kfree(p);
419 tp->root = NULL;
420}
421
422
423static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
424 struct sk_buff *skb, struct tcmsg *t)
425{
426 struct tcindex_data *p = PRIV(tp);
427 struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh;
428 unsigned char *b = skb_tail_pointer(skb);
429 struct nlattr *nest;
430
431 pr_debug("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p,b %p\n",
432 tp, fh, skb, t, p, r, b);
433 pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
434
435 nest = nla_nest_start(skb, TCA_OPTIONS);
436 if (nest == NULL)
437 goto nla_put_failure;
438
439 if (!fh) {
440 t->tcm_handle = ~0; /* whatever ... */
441 if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
442 nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
443 nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
444 nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
445 goto nla_put_failure;
446 nla_nest_end(skb, nest);
447 } else {
448 if (p->perfect) {
449 t->tcm_handle = r-p->perfect;
450 } else {
451 struct tcindex_filter *f;
452 int i;
453
454 t->tcm_handle = 0;
455 for (i = 0; !t->tcm_handle && i < p->hash; i++) {
456 for (f = p->h[i]; !t->tcm_handle && f;
457 f = f->next) {
458 if (&f->result == r)
459 t->tcm_handle = f->key;
460 }
461 }
462 }
463 pr_debug("handle = %d\n", t->tcm_handle);
464 if (r->res.class &&
465 nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
466 goto nla_put_failure;
467
468 if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0)
469 goto nla_put_failure;
470 nla_nest_end(skb, nest);
471
472 if (tcf_exts_dump_stats(skb, &r->exts, &tcindex_ext_map) < 0)
473 goto nla_put_failure;
474 }
475
476 return skb->len;
477
478nla_put_failure:
479 nlmsg_trim(skb, b);
480 return -1;
481}
482
483static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
484 .kind = "tcindex",
485 .classify = tcindex_classify,
486 .init = tcindex_init,
487 .destroy = tcindex_destroy,
488 .get = tcindex_get,
489 .put = tcindex_put,
490 .change = tcindex_change,
491 .delete = tcindex_delete,
492 .walk = tcindex_walk,
493 .dump = tcindex_dump,
494 .owner = THIS_MODULE,
495};
496
497static int __init init_tcindex(void)
498{
499 return register_tcf_proto_ops(&cls_tcindex_ops);
500}
501
502static void __exit exit_tcindex(void)
503{
504 unregister_tcf_proto_ops(&cls_tcindex_ops);
505}
506
507module_init(init_tcindex)
508module_exit(exit_tcindex)
509MODULE_LICENSE("GPL");
1/*
2 * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
3 *
4 * Written 1998,1999 by Werner Almesberger, EPFL ICA
5 */
6
7#include <linux/module.h>
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/skbuff.h>
11#include <linux/errno.h>
12#include <linux/slab.h>
13#include <net/act_api.h>
14#include <net/netlink.h>
15#include <net/pkt_cls.h>
16
17/*
18 * Passing parameters to the root seems to be done more awkwardly than really
19 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
20 * verified. FIXME.
21 */
22
23#define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
24#define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
25
26
27struct tcindex_filter_result {
28 struct tcf_exts exts;
29 struct tcf_result res;
30 struct rcu_head rcu;
31};
32
33struct tcindex_filter {
34 u16 key;
35 struct tcindex_filter_result result;
36 struct tcindex_filter __rcu *next;
37 struct rcu_head rcu;
38};
39
40
41struct tcindex_data {
42 struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
43 struct tcindex_filter __rcu **h; /* imperfect hash; */
44 struct tcf_proto *tp;
45 u16 mask; /* AND key with mask */
46 u32 shift; /* shift ANDed key to the right */
47 u32 hash; /* hash table size; 0 if undefined */
48 u32 alloc_hash; /* allocated size */
49 u32 fall_through; /* 0: only classify if explicit match */
50 struct rcu_head rcu;
51};
52
53static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
54{
55 return tcf_exts_is_predicative(&r->exts) || r->res.classid;
56}
57
58static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
59 u16 key)
60{
61 if (p->perfect) {
62 struct tcindex_filter_result *f = p->perfect + key;
63
64 return tcindex_filter_is_set(f) ? f : NULL;
65 } else if (p->h) {
66 struct tcindex_filter __rcu **fp;
67 struct tcindex_filter *f;
68
69 fp = &p->h[key % p->hash];
70 for (f = rcu_dereference_bh_rtnl(*fp);
71 f;
72 fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
73 if (f->key == key)
74 return &f->result;
75 }
76
77 return NULL;
78}
79
80
81static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
82 struct tcf_result *res)
83{
84 struct tcindex_data *p = rcu_dereference_bh(tp->root);
85 struct tcindex_filter_result *f;
86 int key = (skb->tc_index & p->mask) >> p->shift;
87
88 pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
89 skb, tp, res, p);
90
91 f = tcindex_lookup(p, key);
92 if (!f) {
93 if (!p->fall_through)
94 return -1;
95 res->classid = TC_H_MAKE(TC_H_MAJ(tp->q->handle), key);
96 res->class = 0;
97 pr_debug("alg 0x%x\n", res->classid);
98 return 0;
99 }
100 *res = f->res;
101 pr_debug("map 0x%x\n", res->classid);
102
103 return tcf_exts_exec(skb, &f->exts, res);
104}
105
106
107static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle)
108{
109 struct tcindex_data *p = rtnl_dereference(tp->root);
110 struct tcindex_filter_result *r;
111
112 pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
113 if (p->perfect && handle >= p->alloc_hash)
114 return 0;
115 r = tcindex_lookup(p, handle);
116 return r && tcindex_filter_is_set(r) ? (unsigned long) r : 0UL;
117}
118
119static int tcindex_init(struct tcf_proto *tp)
120{
121 struct tcindex_data *p;
122
123 pr_debug("tcindex_init(tp %p)\n", tp);
124 p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
125 if (!p)
126 return -ENOMEM;
127
128 p->mask = 0xffff;
129 p->hash = DEFAULT_HASH_SIZE;
130 p->fall_through = 1;
131
132 rcu_assign_pointer(tp->root, p);
133 return 0;
134}
135
136static void tcindex_destroy_rexts(struct rcu_head *head)
137{
138 struct tcindex_filter_result *r;
139
140 r = container_of(head, struct tcindex_filter_result, rcu);
141 tcf_exts_destroy(&r->exts);
142}
143
144static void tcindex_destroy_fexts(struct rcu_head *head)
145{
146 struct tcindex_filter *f = container_of(head, struct tcindex_filter,
147 rcu);
148
149 tcf_exts_destroy(&f->result.exts);
150 kfree(f);
151}
152
153static int tcindex_delete(struct tcf_proto *tp, unsigned long arg)
154{
155 struct tcindex_data *p = rtnl_dereference(tp->root);
156 struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg;
157 struct tcindex_filter __rcu **walk;
158 struct tcindex_filter *f = NULL;
159
160 pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p\n", tp, arg, p);
161 if (p->perfect) {
162 if (!r->res.class)
163 return -ENOENT;
164 } else {
165 int i;
166
167 for (i = 0; i < p->hash; i++) {
168 walk = p->h + i;
169 for (f = rtnl_dereference(*walk); f;
170 walk = &f->next, f = rtnl_dereference(*walk)) {
171 if (&f->result == r)
172 goto found;
173 }
174 }
175 return -ENOENT;
176
177found:
178 rcu_assign_pointer(*walk, rtnl_dereference(f->next));
179 }
180 tcf_unbind_filter(tp, &r->res);
181 /* all classifiers are required to call tcf_exts_destroy() after rcu
182 * grace period, since converted-to-rcu actions are relying on that
183 * in cleanup() callback
184 */
185 if (f)
186 call_rcu(&f->rcu, tcindex_destroy_fexts);
187 else
188 call_rcu(&r->rcu, tcindex_destroy_rexts);
189 return 0;
190}
191
192static int tcindex_destroy_element(struct tcf_proto *tp,
193 unsigned long arg,
194 struct tcf_walker *walker)
195{
196 return tcindex_delete(tp, arg);
197}
198
199static void __tcindex_destroy(struct rcu_head *head)
200{
201 struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
202
203 kfree(p->perfect);
204 kfree(p->h);
205 kfree(p);
206}
207
208static inline int
209valid_perfect_hash(struct tcindex_data *p)
210{
211 return p->hash > (p->mask >> p->shift);
212}
213
214static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
215 [TCA_TCINDEX_HASH] = { .type = NLA_U32 },
216 [TCA_TCINDEX_MASK] = { .type = NLA_U16 },
217 [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 },
218 [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 },
219 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
220};
221
222static int tcindex_filter_result_init(struct tcindex_filter_result *r)
223{
224 memset(r, 0, sizeof(*r));
225 return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
226}
227
228static void __tcindex_partial_destroy(struct rcu_head *head)
229{
230 struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
231
232 kfree(p->perfect);
233 kfree(p);
234}
235
236static void tcindex_free_perfect_hash(struct tcindex_data *cp)
237{
238 int i;
239
240 for (i = 0; i < cp->hash; i++)
241 tcf_exts_destroy(&cp->perfect[i].exts);
242 kfree(cp->perfect);
243}
244
245static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
246{
247 int i, err = 0;
248
249 cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
250 GFP_KERNEL);
251 if (!cp->perfect)
252 return -ENOMEM;
253
254 for (i = 0; i < cp->hash; i++) {
255 err = tcf_exts_init(&cp->perfect[i].exts,
256 TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
257 if (err < 0)
258 goto errout;
259 }
260
261 return 0;
262
263errout:
264 tcindex_free_perfect_hash(cp);
265 return err;
266}
267
268static int
269tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
270 u32 handle, struct tcindex_data *p,
271 struct tcindex_filter_result *r, struct nlattr **tb,
272 struct nlattr *est, bool ovr)
273{
274 struct tcindex_filter_result new_filter_result, *old_r = r;
275 struct tcindex_filter_result cr;
276 struct tcindex_data *cp = NULL, *oldp;
277 struct tcindex_filter *f = NULL; /* make gcc behave */
278 int err, balloc = 0;
279 struct tcf_exts e;
280
281 err = tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
282 if (err < 0)
283 return err;
284 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
285 if (err < 0)
286 goto errout;
287
288 err = -ENOMEM;
289 /* tcindex_data attributes must look atomic to classifier/lookup so
290 * allocate new tcindex data and RCU assign it onto root. Keeping
291 * perfect hash and hash pointers from old data.
292 */
293 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
294 if (!cp)
295 goto errout;
296
297 cp->mask = p->mask;
298 cp->shift = p->shift;
299 cp->hash = p->hash;
300 cp->alloc_hash = p->alloc_hash;
301 cp->fall_through = p->fall_through;
302 cp->tp = tp;
303
304 if (p->perfect) {
305 int i;
306
307 if (tcindex_alloc_perfect_hash(cp) < 0)
308 goto errout;
309 for (i = 0; i < cp->hash; i++)
310 cp->perfect[i].res = p->perfect[i].res;
311 balloc = 1;
312 }
313 cp->h = p->h;
314
315 err = tcindex_filter_result_init(&new_filter_result);
316 if (err < 0)
317 goto errout1;
318 err = tcindex_filter_result_init(&cr);
319 if (err < 0)
320 goto errout1;
321 if (old_r)
322 cr.res = r->res;
323
324 if (tb[TCA_TCINDEX_HASH])
325 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
326
327 if (tb[TCA_TCINDEX_MASK])
328 cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
329
330 if (tb[TCA_TCINDEX_SHIFT])
331 cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
332
333 err = -EBUSY;
334
335 /* Hash already allocated, make sure that we still meet the
336 * requirements for the allocated hash.
337 */
338 if (cp->perfect) {
339 if (!valid_perfect_hash(cp) ||
340 cp->hash > cp->alloc_hash)
341 goto errout_alloc;
342 } else if (cp->h && cp->hash != cp->alloc_hash) {
343 goto errout_alloc;
344 }
345
346 err = -EINVAL;
347 if (tb[TCA_TCINDEX_FALL_THROUGH])
348 cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
349
350 if (!cp->hash) {
351 /* Hash not specified, use perfect hash if the upper limit
352 * of the hashing index is below the threshold.
353 */
354 if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
355 cp->hash = (cp->mask >> cp->shift) + 1;
356 else
357 cp->hash = DEFAULT_HASH_SIZE;
358 }
359
360 if (!cp->perfect && !cp->h)
361 cp->alloc_hash = cp->hash;
362
363 /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
364 * but then, we'd fail handles that may become valid after some future
365 * mask change. While this is extremely unlikely to ever matter,
366 * the check below is safer (and also more backwards-compatible).
367 */
368 if (cp->perfect || valid_perfect_hash(cp))
369 if (handle >= cp->alloc_hash)
370 goto errout_alloc;
371
372
373 err = -ENOMEM;
374 if (!cp->perfect && !cp->h) {
375 if (valid_perfect_hash(cp)) {
376 if (tcindex_alloc_perfect_hash(cp) < 0)
377 goto errout_alloc;
378 balloc = 1;
379 } else {
380 struct tcindex_filter __rcu **hash;
381
382 hash = kcalloc(cp->hash,
383 sizeof(struct tcindex_filter *),
384 GFP_KERNEL);
385
386 if (!hash)
387 goto errout_alloc;
388
389 cp->h = hash;
390 balloc = 2;
391 }
392 }
393
394 if (cp->perfect)
395 r = cp->perfect + handle;
396 else
397 r = tcindex_lookup(cp, handle) ? : &new_filter_result;
398
399 if (r == &new_filter_result) {
400 f = kzalloc(sizeof(*f), GFP_KERNEL);
401 if (!f)
402 goto errout_alloc;
403 f->key = handle;
404 f->next = NULL;
405 err = tcindex_filter_result_init(&f->result);
406 if (err < 0) {
407 kfree(f);
408 goto errout_alloc;
409 }
410 }
411
412 if (tb[TCA_TCINDEX_CLASSID]) {
413 cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
414 tcf_bind_filter(tp, &cr.res, base);
415 }
416
417 if (old_r)
418 tcf_exts_change(tp, &r->exts, &e);
419 else
420 tcf_exts_change(tp, &cr.exts, &e);
421
422 if (old_r && old_r != r) {
423 err = tcindex_filter_result_init(old_r);
424 if (err < 0) {
425 kfree(f);
426 goto errout_alloc;
427 }
428 }
429
430 oldp = p;
431 r->res = cr.res;
432 rcu_assign_pointer(tp->root, cp);
433
434 if (r == &new_filter_result) {
435 struct tcindex_filter *nfp;
436 struct tcindex_filter __rcu **fp;
437
438 tcf_exts_change(tp, &f->result.exts, &r->exts);
439
440 fp = cp->h + (handle % cp->hash);
441 for (nfp = rtnl_dereference(*fp);
442 nfp;
443 fp = &nfp->next, nfp = rtnl_dereference(*fp))
444 ; /* nothing */
445
446 rcu_assign_pointer(*fp, f);
447 }
448
449 if (oldp)
450 call_rcu(&oldp->rcu, __tcindex_partial_destroy);
451 return 0;
452
453errout_alloc:
454 if (balloc == 1)
455 tcindex_free_perfect_hash(cp);
456 else if (balloc == 2)
457 kfree(cp->h);
458errout1:
459 tcf_exts_destroy(&cr.exts);
460 tcf_exts_destroy(&new_filter_result.exts);
461errout:
462 kfree(cp);
463 tcf_exts_destroy(&e);
464 return err;
465}
466
467static int
468tcindex_change(struct net *net, struct sk_buff *in_skb,
469 struct tcf_proto *tp, unsigned long base, u32 handle,
470 struct nlattr **tca, unsigned long *arg, bool ovr)
471{
472 struct nlattr *opt = tca[TCA_OPTIONS];
473 struct nlattr *tb[TCA_TCINDEX_MAX + 1];
474 struct tcindex_data *p = rtnl_dereference(tp->root);
475 struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg;
476 int err;
477
478 pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
479 "p %p,r %p,*arg 0x%lx\n",
480 tp, handle, tca, arg, opt, p, r, arg ? *arg : 0L);
481
482 if (!opt)
483 return 0;
484
485 err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy);
486 if (err < 0)
487 return err;
488
489 return tcindex_set_parms(net, tp, base, handle, p, r, tb,
490 tca[TCA_RATE], ovr);
491}
492
493static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
494{
495 struct tcindex_data *p = rtnl_dereference(tp->root);
496 struct tcindex_filter *f, *next;
497 int i;
498
499 pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
500 if (p->perfect) {
501 for (i = 0; i < p->hash; i++) {
502 if (!p->perfect[i].res.class)
503 continue;
504 if (walker->count >= walker->skip) {
505 if (walker->fn(tp,
506 (unsigned long) (p->perfect+i), walker)
507 < 0) {
508 walker->stop = 1;
509 return;
510 }
511 }
512 walker->count++;
513 }
514 }
515 if (!p->h)
516 return;
517 for (i = 0; i < p->hash; i++) {
518 for (f = rtnl_dereference(p->h[i]); f; f = next) {
519 next = rtnl_dereference(f->next);
520 if (walker->count >= walker->skip) {
521 if (walker->fn(tp, (unsigned long) &f->result,
522 walker) < 0) {
523 walker->stop = 1;
524 return;
525 }
526 }
527 walker->count++;
528 }
529 }
530}
531
532static bool tcindex_destroy(struct tcf_proto *tp, bool force)
533{
534 struct tcindex_data *p = rtnl_dereference(tp->root);
535 struct tcf_walker walker;
536
537 if (!force)
538 return false;
539
540 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
541 walker.count = 0;
542 walker.skip = 0;
543 walker.fn = tcindex_destroy_element;
544 tcindex_walk(tp, &walker);
545
546 call_rcu(&p->rcu, __tcindex_destroy);
547 return true;
548}
549
550
551static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
552 struct sk_buff *skb, struct tcmsg *t)
553{
554 struct tcindex_data *p = rtnl_dereference(tp->root);
555 struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh;
556 struct nlattr *nest;
557
558 pr_debug("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p\n",
559 tp, fh, skb, t, p, r);
560 pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
561
562 nest = nla_nest_start(skb, TCA_OPTIONS);
563 if (nest == NULL)
564 goto nla_put_failure;
565
566 if (!fh) {
567 t->tcm_handle = ~0; /* whatever ... */
568 if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
569 nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
570 nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
571 nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
572 goto nla_put_failure;
573 nla_nest_end(skb, nest);
574 } else {
575 if (p->perfect) {
576 t->tcm_handle = r - p->perfect;
577 } else {
578 struct tcindex_filter *f;
579 struct tcindex_filter __rcu **fp;
580 int i;
581
582 t->tcm_handle = 0;
583 for (i = 0; !t->tcm_handle && i < p->hash; i++) {
584 fp = &p->h[i];
585 for (f = rtnl_dereference(*fp);
586 !t->tcm_handle && f;
587 fp = &f->next, f = rtnl_dereference(*fp)) {
588 if (&f->result == r)
589 t->tcm_handle = f->key;
590 }
591 }
592 }
593 pr_debug("handle = %d\n", t->tcm_handle);
594 if (r->res.class &&
595 nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
596 goto nla_put_failure;
597
598 if (tcf_exts_dump(skb, &r->exts) < 0)
599 goto nla_put_failure;
600 nla_nest_end(skb, nest);
601
602 if (tcf_exts_dump_stats(skb, &r->exts) < 0)
603 goto nla_put_failure;
604 }
605
606 return skb->len;
607
608nla_put_failure:
609 nla_nest_cancel(skb, nest);
610 return -1;
611}
612
613static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
614 .kind = "tcindex",
615 .classify = tcindex_classify,
616 .init = tcindex_init,
617 .destroy = tcindex_destroy,
618 .get = tcindex_get,
619 .change = tcindex_change,
620 .delete = tcindex_delete,
621 .walk = tcindex_walk,
622 .dump = tcindex_dump,
623 .owner = THIS_MODULE,
624};
625
626static int __init init_tcindex(void)
627{
628 return register_tcf_proto_ops(&cls_tcindex_ops);
629}
630
631static void __exit exit_tcindex(void)
632{
633 unregister_tcf_proto_ops(&cls_tcindex_ops);
634}
635
636module_init(init_tcindex)
637module_exit(exit_tcindex)
638MODULE_LICENSE("GPL");