Loading...
1/*
2 * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
3 *
4 * Written 1998,1999 by Werner Almesberger, EPFL ICA
5 */
6
7#include <linux/module.h>
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/skbuff.h>
11#include <linux/errno.h>
12#include <linux/slab.h>
13#include <net/act_api.h>
14#include <net/netlink.h>
15#include <net/pkt_cls.h>
16
17/*
18 * Passing parameters to the root seems to be done more awkwardly than really
19 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
20 * verified. FIXME.
21 */
22
23#define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
24#define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
25
26
27#define PRIV(tp) ((struct tcindex_data *) (tp)->root)
28
29
30struct tcindex_filter_result {
31 struct tcf_exts exts;
32 struct tcf_result res;
33};
34
35struct tcindex_filter {
36 u16 key;
37 struct tcindex_filter_result result;
38 struct tcindex_filter *next;
39};
40
41
42struct tcindex_data {
43 struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
44 struct tcindex_filter **h; /* imperfect hash; only used if !perfect;
45 NULL if unused */
46 u16 mask; /* AND key with mask */
47 int shift; /* shift ANDed key to the right */
48 int hash; /* hash table size; 0 if undefined */
49 int alloc_hash; /* allocated size */
50 int fall_through; /* 0: only classify if explicit match */
51};
52
53static const struct tcf_ext_map tcindex_ext_map = {
54 .police = TCA_TCINDEX_POLICE,
55 .action = TCA_TCINDEX_ACT
56};
57
58static inline int
59tcindex_filter_is_set(struct tcindex_filter_result *r)
60{
61 return tcf_exts_is_predicative(&r->exts) || r->res.classid;
62}
63
64static struct tcindex_filter_result *
65tcindex_lookup(struct tcindex_data *p, u16 key)
66{
67 struct tcindex_filter *f;
68
69 if (p->perfect)
70 return tcindex_filter_is_set(p->perfect + key) ?
71 p->perfect + key : NULL;
72 else if (p->h) {
73 for (f = p->h[key % p->hash]; f; f = f->next)
74 if (f->key == key)
75 return &f->result;
76 }
77
78 return NULL;
79}
80
81
82static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
83 struct tcf_result *res)
84{
85 struct tcindex_data *p = PRIV(tp);
86 struct tcindex_filter_result *f;
87 int key = (skb->tc_index & p->mask) >> p->shift;
88
89 pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
90 skb, tp, res, p);
91
92 f = tcindex_lookup(p, key);
93 if (!f) {
94 if (!p->fall_through)
95 return -1;
96 res->classid = TC_H_MAKE(TC_H_MAJ(tp->q->handle), key);
97 res->class = 0;
98 pr_debug("alg 0x%x\n", res->classid);
99 return 0;
100 }
101 *res = f->res;
102 pr_debug("map 0x%x\n", res->classid);
103
104 return tcf_exts_exec(skb, &f->exts, res);
105}
106
107
108static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle)
109{
110 struct tcindex_data *p = PRIV(tp);
111 struct tcindex_filter_result *r;
112
113 pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
114 if (p->perfect && handle >= p->alloc_hash)
115 return 0;
116 r = tcindex_lookup(p, handle);
117 return r && tcindex_filter_is_set(r) ? (unsigned long) r : 0UL;
118}
119
120
121static void tcindex_put(struct tcf_proto *tp, unsigned long f)
122{
123 pr_debug("tcindex_put(tp %p,f 0x%lx)\n", tp, f);
124}
125
126
127static int tcindex_init(struct tcf_proto *tp)
128{
129 struct tcindex_data *p;
130
131 pr_debug("tcindex_init(tp %p)\n", tp);
132 p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
133 if (!p)
134 return -ENOMEM;
135
136 p->mask = 0xffff;
137 p->hash = DEFAULT_HASH_SIZE;
138 p->fall_through = 1;
139
140 tp->root = p;
141 return 0;
142}
143
144
145static int
146__tcindex_delete(struct tcf_proto *tp, unsigned long arg, int lock)
147{
148 struct tcindex_data *p = PRIV(tp);
149 struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg;
150 struct tcindex_filter *f = NULL;
151
152 pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p,f %p\n", tp, arg, p, f);
153 if (p->perfect) {
154 if (!r->res.class)
155 return -ENOENT;
156 } else {
157 int i;
158 struct tcindex_filter **walk = NULL;
159
160 for (i = 0; i < p->hash; i++)
161 for (walk = p->h+i; *walk; walk = &(*walk)->next)
162 if (&(*walk)->result == r)
163 goto found;
164 return -ENOENT;
165
166found:
167 f = *walk;
168 if (lock)
169 tcf_tree_lock(tp);
170 *walk = f->next;
171 if (lock)
172 tcf_tree_unlock(tp);
173 }
174 tcf_unbind_filter(tp, &r->res);
175 tcf_exts_destroy(tp, &r->exts);
176 kfree(f);
177 return 0;
178}
179
180static int tcindex_delete(struct tcf_proto *tp, unsigned long arg)
181{
182 return __tcindex_delete(tp, arg, 1);
183}
184
185static inline int
186valid_perfect_hash(struct tcindex_data *p)
187{
188 return p->hash > (p->mask >> p->shift);
189}
190
191static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
192 [TCA_TCINDEX_HASH] = { .type = NLA_U32 },
193 [TCA_TCINDEX_MASK] = { .type = NLA_U16 },
194 [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 },
195 [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 },
196 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
197};
198
199static int
200tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
201 struct tcindex_data *p, struct tcindex_filter_result *r,
202 struct nlattr **tb, struct nlattr *est)
203{
204 int err, balloc = 0;
205 struct tcindex_filter_result new_filter_result, *old_r = r;
206 struct tcindex_filter_result cr;
207 struct tcindex_data cp;
208 struct tcindex_filter *f = NULL; /* make gcc behave */
209 struct tcf_exts e;
210
211 err = tcf_exts_validate(tp, tb, est, &e, &tcindex_ext_map);
212 if (err < 0)
213 return err;
214
215 memcpy(&cp, p, sizeof(cp));
216 memset(&new_filter_result, 0, sizeof(new_filter_result));
217
218 if (old_r)
219 memcpy(&cr, r, sizeof(cr));
220 else
221 memset(&cr, 0, sizeof(cr));
222
223 if (tb[TCA_TCINDEX_HASH])
224 cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
225
226 if (tb[TCA_TCINDEX_MASK])
227 cp.mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
228
229 if (tb[TCA_TCINDEX_SHIFT])
230 cp.shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
231
232 err = -EBUSY;
233 /* Hash already allocated, make sure that we still meet the
234 * requirements for the allocated hash.
235 */
236 if (cp.perfect) {
237 if (!valid_perfect_hash(&cp) ||
238 cp.hash > cp.alloc_hash)
239 goto errout;
240 } else if (cp.h && cp.hash != cp.alloc_hash)
241 goto errout;
242
243 err = -EINVAL;
244 if (tb[TCA_TCINDEX_FALL_THROUGH])
245 cp.fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
246
247 if (!cp.hash) {
248 /* Hash not specified, use perfect hash if the upper limit
249 * of the hashing index is below the threshold.
250 */
251 if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD)
252 cp.hash = (cp.mask >> cp.shift) + 1;
253 else
254 cp.hash = DEFAULT_HASH_SIZE;
255 }
256
257 if (!cp.perfect && !cp.h)
258 cp.alloc_hash = cp.hash;
259
260 /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
261 * but then, we'd fail handles that may become valid after some future
262 * mask change. While this is extremely unlikely to ever matter,
263 * the check below is safer (and also more backwards-compatible).
264 */
265 if (cp.perfect || valid_perfect_hash(&cp))
266 if (handle >= cp.alloc_hash)
267 goto errout;
268
269
270 err = -ENOMEM;
271 if (!cp.perfect && !cp.h) {
272 if (valid_perfect_hash(&cp)) {
273 cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
274 if (!cp.perfect)
275 goto errout;
276 balloc = 1;
277 } else {
278 cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
279 if (!cp.h)
280 goto errout;
281 balloc = 2;
282 }
283 }
284
285 if (cp.perfect)
286 r = cp.perfect + handle;
287 else
288 r = tcindex_lookup(&cp, handle) ? : &new_filter_result;
289
290 if (r == &new_filter_result) {
291 f = kzalloc(sizeof(*f), GFP_KERNEL);
292 if (!f)
293 goto errout_alloc;
294 }
295
296 if (tb[TCA_TCINDEX_CLASSID]) {
297 cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
298 tcf_bind_filter(tp, &cr.res, base);
299 }
300
301 tcf_exts_change(tp, &cr.exts, &e);
302
303 tcf_tree_lock(tp);
304 if (old_r && old_r != r)
305 memset(old_r, 0, sizeof(*old_r));
306
307 memcpy(p, &cp, sizeof(cp));
308 memcpy(r, &cr, sizeof(cr));
309
310 if (r == &new_filter_result) {
311 struct tcindex_filter **fp;
312
313 f->key = handle;
314 f->result = new_filter_result;
315 f->next = NULL;
316 for (fp = p->h+(handle % p->hash); *fp; fp = &(*fp)->next)
317 /* nothing */;
318 *fp = f;
319 }
320 tcf_tree_unlock(tp);
321
322 return 0;
323
324errout_alloc:
325 if (balloc == 1)
326 kfree(cp.perfect);
327 else if (balloc == 2)
328 kfree(cp.h);
329errout:
330 tcf_exts_destroy(tp, &e);
331 return err;
332}
333
334static int
335tcindex_change(struct tcf_proto *tp, unsigned long base, u32 handle,
336 struct nlattr **tca, unsigned long *arg)
337{
338 struct nlattr *opt = tca[TCA_OPTIONS];
339 struct nlattr *tb[TCA_TCINDEX_MAX + 1];
340 struct tcindex_data *p = PRIV(tp);
341 struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg;
342 int err;
343
344 pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
345 "p %p,r %p,*arg 0x%lx\n",
346 tp, handle, tca, arg, opt, p, r, arg ? *arg : 0L);
347
348 if (!opt)
349 return 0;
350
351 err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy);
352 if (err < 0)
353 return err;
354
355 return tcindex_set_parms(tp, base, handle, p, r, tb, tca[TCA_RATE]);
356}
357
358
359static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
360{
361 struct tcindex_data *p = PRIV(tp);
362 struct tcindex_filter *f, *next;
363 int i;
364
365 pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
366 if (p->perfect) {
367 for (i = 0; i < p->hash; i++) {
368 if (!p->perfect[i].res.class)
369 continue;
370 if (walker->count >= walker->skip) {
371 if (walker->fn(tp,
372 (unsigned long) (p->perfect+i), walker)
373 < 0) {
374 walker->stop = 1;
375 return;
376 }
377 }
378 walker->count++;
379 }
380 }
381 if (!p->h)
382 return;
383 for (i = 0; i < p->hash; i++) {
384 for (f = p->h[i]; f; f = next) {
385 next = f->next;
386 if (walker->count >= walker->skip) {
387 if (walker->fn(tp, (unsigned long) &f->result,
388 walker) < 0) {
389 walker->stop = 1;
390 return;
391 }
392 }
393 walker->count++;
394 }
395 }
396}
397
398
399static int tcindex_destroy_element(struct tcf_proto *tp,
400 unsigned long arg, struct tcf_walker *walker)
401{
402 return __tcindex_delete(tp, arg, 0);
403}
404
405
406static void tcindex_destroy(struct tcf_proto *tp)
407{
408 struct tcindex_data *p = PRIV(tp);
409 struct tcf_walker walker;
410
411 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
412 walker.count = 0;
413 walker.skip = 0;
414 walker.fn = &tcindex_destroy_element;
415 tcindex_walk(tp, &walker);
416 kfree(p->perfect);
417 kfree(p->h);
418 kfree(p);
419 tp->root = NULL;
420}
421
422
423static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
424 struct sk_buff *skb, struct tcmsg *t)
425{
426 struct tcindex_data *p = PRIV(tp);
427 struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh;
428 unsigned char *b = skb_tail_pointer(skb);
429 struct nlattr *nest;
430
431 pr_debug("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p,b %p\n",
432 tp, fh, skb, t, p, r, b);
433 pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
434
435 nest = nla_nest_start(skb, TCA_OPTIONS);
436 if (nest == NULL)
437 goto nla_put_failure;
438
439 if (!fh) {
440 t->tcm_handle = ~0; /* whatever ... */
441 NLA_PUT_U32(skb, TCA_TCINDEX_HASH, p->hash);
442 NLA_PUT_U16(skb, TCA_TCINDEX_MASK, p->mask);
443 NLA_PUT_U32(skb, TCA_TCINDEX_SHIFT, p->shift);
444 NLA_PUT_U32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through);
445 nla_nest_end(skb, nest);
446 } else {
447 if (p->perfect) {
448 t->tcm_handle = r-p->perfect;
449 } else {
450 struct tcindex_filter *f;
451 int i;
452
453 t->tcm_handle = 0;
454 for (i = 0; !t->tcm_handle && i < p->hash; i++) {
455 for (f = p->h[i]; !t->tcm_handle && f;
456 f = f->next) {
457 if (&f->result == r)
458 t->tcm_handle = f->key;
459 }
460 }
461 }
462 pr_debug("handle = %d\n", t->tcm_handle);
463 if (r->res.class)
464 NLA_PUT_U32(skb, TCA_TCINDEX_CLASSID, r->res.classid);
465
466 if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0)
467 goto nla_put_failure;
468 nla_nest_end(skb, nest);
469
470 if (tcf_exts_dump_stats(skb, &r->exts, &tcindex_ext_map) < 0)
471 goto nla_put_failure;
472 }
473
474 return skb->len;
475
476nla_put_failure:
477 nlmsg_trim(skb, b);
478 return -1;
479}
480
481static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
482 .kind = "tcindex",
483 .classify = tcindex_classify,
484 .init = tcindex_init,
485 .destroy = tcindex_destroy,
486 .get = tcindex_get,
487 .put = tcindex_put,
488 .change = tcindex_change,
489 .delete = tcindex_delete,
490 .walk = tcindex_walk,
491 .dump = tcindex_dump,
492 .owner = THIS_MODULE,
493};
494
495static int __init init_tcindex(void)
496{
497 return register_tcf_proto_ops(&cls_tcindex_ops);
498}
499
500static void __exit exit_tcindex(void)
501{
502 unregister_tcf_proto_ops(&cls_tcindex_ops);
503}
504
505module_init(init_tcindex)
506module_exit(exit_tcindex)
507MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
4 *
5 * Written 1998,1999 by Werner Almesberger, EPFL ICA
6 */
7
8#include <linux/module.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/skbuff.h>
12#include <linux/errno.h>
13#include <linux/slab.h>
14#include <linux/refcount.h>
15#include <net/act_api.h>
16#include <net/netlink.h>
17#include <net/pkt_cls.h>
18#include <net/sch_generic.h>
19
20/*
21 * Passing parameters to the root seems to be done more awkwardly than really
22 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
23 * verified. FIXME.
24 */
25
26#define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
27#define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
28
29
30struct tcindex_data;
31
32struct tcindex_filter_result {
33 struct tcf_exts exts;
34 struct tcf_result res;
35 struct tcindex_data *p;
36 struct rcu_work rwork;
37};
38
39struct tcindex_filter {
40 u16 key;
41 struct tcindex_filter_result result;
42 struct tcindex_filter __rcu *next;
43 struct rcu_work rwork;
44};
45
46
47struct tcindex_data {
48 struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
49 struct tcindex_filter __rcu **h; /* imperfect hash; */
50 struct tcf_proto *tp;
51 u16 mask; /* AND key with mask */
52 u32 shift; /* shift ANDed key to the right */
53 u32 hash; /* hash table size; 0 if undefined */
54 u32 alloc_hash; /* allocated size */
55 u32 fall_through; /* 0: only classify if explicit match */
56 refcount_t refcnt; /* a temporary refcnt for perfect hash */
57 struct rcu_work rwork;
58};
59
60static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
61{
62 return tcf_exts_has_actions(&r->exts) || r->res.classid;
63}
64
65static void tcindex_data_get(struct tcindex_data *p)
66{
67 refcount_inc(&p->refcnt);
68}
69
70static void tcindex_data_put(struct tcindex_data *p)
71{
72 if (refcount_dec_and_test(&p->refcnt)) {
73 kfree(p->perfect);
74 kfree(p->h);
75 kfree(p);
76 }
77}
78
79static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
80 u16 key)
81{
82 if (p->perfect) {
83 struct tcindex_filter_result *f = p->perfect + key;
84
85 return tcindex_filter_is_set(f) ? f : NULL;
86 } else if (p->h) {
87 struct tcindex_filter __rcu **fp;
88 struct tcindex_filter *f;
89
90 fp = &p->h[key % p->hash];
91 for (f = rcu_dereference_bh_rtnl(*fp);
92 f;
93 fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
94 if (f->key == key)
95 return &f->result;
96 }
97
98 return NULL;
99}
100
101
102static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
103 struct tcf_result *res)
104{
105 struct tcindex_data *p = rcu_dereference_bh(tp->root);
106 struct tcindex_filter_result *f;
107 int key = (skb->tc_index & p->mask) >> p->shift;
108
109 pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
110 skb, tp, res, p);
111
112 f = tcindex_lookup(p, key);
113 if (!f) {
114 struct Qdisc *q = tcf_block_q(tp->chain->block);
115
116 if (!p->fall_through)
117 return -1;
118 res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key);
119 res->class = 0;
120 pr_debug("alg 0x%x\n", res->classid);
121 return 0;
122 }
123 *res = f->res;
124 pr_debug("map 0x%x\n", res->classid);
125
126 return tcf_exts_exec(skb, &f->exts, res);
127}
128
129
130static void *tcindex_get(struct tcf_proto *tp, u32 handle)
131{
132 struct tcindex_data *p = rtnl_dereference(tp->root);
133 struct tcindex_filter_result *r;
134
135 pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
136 if (p->perfect && handle >= p->alloc_hash)
137 return NULL;
138 r = tcindex_lookup(p, handle);
139 return r && tcindex_filter_is_set(r) ? r : NULL;
140}
141
142static int tcindex_init(struct tcf_proto *tp)
143{
144 struct tcindex_data *p;
145
146 pr_debug("tcindex_init(tp %p)\n", tp);
147 p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
148 if (!p)
149 return -ENOMEM;
150
151 p->mask = 0xffff;
152 p->hash = DEFAULT_HASH_SIZE;
153 p->fall_through = 1;
154 refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */
155
156 rcu_assign_pointer(tp->root, p);
157 return 0;
158}
159
160static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
161{
162 tcf_exts_destroy(&r->exts);
163 tcf_exts_put_net(&r->exts);
164 tcindex_data_put(r->p);
165}
166
167static void tcindex_destroy_rexts_work(struct work_struct *work)
168{
169 struct tcindex_filter_result *r;
170
171 r = container_of(to_rcu_work(work),
172 struct tcindex_filter_result,
173 rwork);
174 rtnl_lock();
175 __tcindex_destroy_rexts(r);
176 rtnl_unlock();
177}
178
179static void __tcindex_destroy_fexts(struct tcindex_filter *f)
180{
181 tcf_exts_destroy(&f->result.exts);
182 tcf_exts_put_net(&f->result.exts);
183 kfree(f);
184}
185
186static void tcindex_destroy_fexts_work(struct work_struct *work)
187{
188 struct tcindex_filter *f = container_of(to_rcu_work(work),
189 struct tcindex_filter,
190 rwork);
191
192 rtnl_lock();
193 __tcindex_destroy_fexts(f);
194 rtnl_unlock();
195}
196
197static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
198 bool rtnl_held, struct netlink_ext_ack *extack)
199{
200 struct tcindex_data *p = rtnl_dereference(tp->root);
201 struct tcindex_filter_result *r = arg;
202 struct tcindex_filter __rcu **walk;
203 struct tcindex_filter *f = NULL;
204
205 pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
206 if (p->perfect) {
207 if (!r->res.class)
208 return -ENOENT;
209 } else {
210 int i;
211
212 for (i = 0; i < p->hash; i++) {
213 walk = p->h + i;
214 for (f = rtnl_dereference(*walk); f;
215 walk = &f->next, f = rtnl_dereference(*walk)) {
216 if (&f->result == r)
217 goto found;
218 }
219 }
220 return -ENOENT;
221
222found:
223 rcu_assign_pointer(*walk, rtnl_dereference(f->next));
224 }
225 tcf_unbind_filter(tp, &r->res);
226 /* all classifiers are required to call tcf_exts_destroy() after rcu
227 * grace period, since converted-to-rcu actions are relying on that
228 * in cleanup() callback
229 */
230 if (f) {
231 if (tcf_exts_get_net(&f->result.exts))
232 tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work);
233 else
234 __tcindex_destroy_fexts(f);
235 } else {
236 tcindex_data_get(p);
237
238 if (tcf_exts_get_net(&r->exts))
239 tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
240 else
241 __tcindex_destroy_rexts(r);
242 }
243
244 *last = false;
245 return 0;
246}
247
248static void tcindex_destroy_work(struct work_struct *work)
249{
250 struct tcindex_data *p = container_of(to_rcu_work(work),
251 struct tcindex_data,
252 rwork);
253
254 tcindex_data_put(p);
255}
256
257static inline int
258valid_perfect_hash(struct tcindex_data *p)
259{
260 return p->hash > (p->mask >> p->shift);
261}
262
263static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
264 [TCA_TCINDEX_HASH] = { .type = NLA_U32 },
265 [TCA_TCINDEX_MASK] = { .type = NLA_U16 },
266 [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 },
267 [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 },
268 [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
269};
270
271static int tcindex_filter_result_init(struct tcindex_filter_result *r,
272 struct tcindex_data *p,
273 struct net *net)
274{
275 memset(r, 0, sizeof(*r));
276 r->p = p;
277 return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
278 TCA_TCINDEX_POLICE);
279}
280
281static void tcindex_partial_destroy_work(struct work_struct *work)
282{
283 struct tcindex_data *p = container_of(to_rcu_work(work),
284 struct tcindex_data,
285 rwork);
286
287 rtnl_lock();
288 kfree(p->perfect);
289 kfree(p);
290 rtnl_unlock();
291}
292
293static void tcindex_free_perfect_hash(struct tcindex_data *cp)
294{
295 int i;
296
297 for (i = 0; i < cp->hash; i++)
298 tcf_exts_destroy(&cp->perfect[i].exts);
299 kfree(cp->perfect);
300}
301
302static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
303{
304 int i, err = 0;
305
306 cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
307 GFP_KERNEL);
308 if (!cp->perfect)
309 return -ENOMEM;
310
311 for (i = 0; i < cp->hash; i++) {
312 err = tcf_exts_init(&cp->perfect[i].exts, net,
313 TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
314 if (err < 0)
315 goto errout;
316 cp->perfect[i].p = cp;
317 }
318
319 return 0;
320
321errout:
322 tcindex_free_perfect_hash(cp);
323 return err;
324}
325
326static int
327tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
328 u32 handle, struct tcindex_data *p,
329 struct tcindex_filter_result *r, struct nlattr **tb,
330 struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
331{
332 struct tcindex_filter_result new_filter_result, *old_r = r;
333 struct tcindex_data *cp = NULL, *oldp;
334 struct tcindex_filter *f = NULL; /* make gcc behave */
335 struct tcf_result cr = {};
336 int err, balloc = 0;
337 struct tcf_exts e;
338
339 err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
340 if (err < 0)
341 return err;
342 err = tcf_exts_validate(net, tp, tb, est, &e, ovr, true, extack);
343 if (err < 0)
344 goto errout;
345
346 err = -ENOMEM;
347 /* tcindex_data attributes must look atomic to classifier/lookup so
348 * allocate new tcindex data and RCU assign it onto root. Keeping
349 * perfect hash and hash pointers from old data.
350 */
351 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
352 if (!cp)
353 goto errout;
354
355 cp->mask = p->mask;
356 cp->shift = p->shift;
357 cp->hash = p->hash;
358 cp->alloc_hash = p->alloc_hash;
359 cp->fall_through = p->fall_through;
360 cp->tp = tp;
361 refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */
362
363 if (tb[TCA_TCINDEX_HASH])
364 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
365
366 if (tb[TCA_TCINDEX_MASK])
367 cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
368
369 if (tb[TCA_TCINDEX_SHIFT])
370 cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
371
372 if (!cp->hash) {
373 /* Hash not specified, use perfect hash if the upper limit
374 * of the hashing index is below the threshold.
375 */
376 if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
377 cp->hash = (cp->mask >> cp->shift) + 1;
378 else
379 cp->hash = DEFAULT_HASH_SIZE;
380 }
381
382 if (p->perfect) {
383 int i;
384
385 if (tcindex_alloc_perfect_hash(net, cp) < 0)
386 goto errout;
387 cp->alloc_hash = cp->hash;
388 for (i = 0; i < min(cp->hash, p->hash); i++)
389 cp->perfect[i].res = p->perfect[i].res;
390 balloc = 1;
391 }
392 cp->h = p->h;
393
394 err = tcindex_filter_result_init(&new_filter_result, cp, net);
395 if (err < 0)
396 goto errout_alloc;
397 if (old_r)
398 cr = r->res;
399
400 err = -EBUSY;
401
402 /* Hash already allocated, make sure that we still meet the
403 * requirements for the allocated hash.
404 */
405 if (cp->perfect) {
406 if (!valid_perfect_hash(cp) ||
407 cp->hash > cp->alloc_hash)
408 goto errout_alloc;
409 } else if (cp->h && cp->hash != cp->alloc_hash) {
410 goto errout_alloc;
411 }
412
413 err = -EINVAL;
414 if (tb[TCA_TCINDEX_FALL_THROUGH])
415 cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
416
417 if (!cp->perfect && !cp->h)
418 cp->alloc_hash = cp->hash;
419
420 /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
421 * but then, we'd fail handles that may become valid after some future
422 * mask change. While this is extremely unlikely to ever matter,
423 * the check below is safer (and also more backwards-compatible).
424 */
425 if (cp->perfect || valid_perfect_hash(cp))
426 if (handle >= cp->alloc_hash)
427 goto errout_alloc;
428
429
430 err = -ENOMEM;
431 if (!cp->perfect && !cp->h) {
432 if (valid_perfect_hash(cp)) {
433 if (tcindex_alloc_perfect_hash(net, cp) < 0)
434 goto errout_alloc;
435 balloc = 1;
436 } else {
437 struct tcindex_filter __rcu **hash;
438
439 hash = kcalloc(cp->hash,
440 sizeof(struct tcindex_filter *),
441 GFP_KERNEL);
442
443 if (!hash)
444 goto errout_alloc;
445
446 cp->h = hash;
447 balloc = 2;
448 }
449 }
450
451 if (cp->perfect)
452 r = cp->perfect + handle;
453 else
454 r = tcindex_lookup(cp, handle) ? : &new_filter_result;
455
456 if (r == &new_filter_result) {
457 f = kzalloc(sizeof(*f), GFP_KERNEL);
458 if (!f)
459 goto errout_alloc;
460 f->key = handle;
461 f->next = NULL;
462 err = tcindex_filter_result_init(&f->result, cp, net);
463 if (err < 0) {
464 kfree(f);
465 goto errout_alloc;
466 }
467 }
468
469 if (tb[TCA_TCINDEX_CLASSID]) {
470 cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
471 tcf_bind_filter(tp, &cr, base);
472 }
473
474 if (old_r && old_r != r) {
475 err = tcindex_filter_result_init(old_r, cp, net);
476 if (err < 0) {
477 kfree(f);
478 goto errout_alloc;
479 }
480 }
481
482 oldp = p;
483 r->res = cr;
484 tcf_exts_change(&r->exts, &e);
485
486 rcu_assign_pointer(tp->root, cp);
487
488 if (r == &new_filter_result) {
489 struct tcindex_filter *nfp;
490 struct tcindex_filter __rcu **fp;
491
492 f->result.res = r->res;
493 tcf_exts_change(&f->result.exts, &r->exts);
494
495 fp = cp->h + (handle % cp->hash);
496 for (nfp = rtnl_dereference(*fp);
497 nfp;
498 fp = &nfp->next, nfp = rtnl_dereference(*fp))
499 ; /* nothing */
500
501 rcu_assign_pointer(*fp, f);
502 } else {
503 tcf_exts_destroy(&new_filter_result.exts);
504 }
505
506 if (oldp)
507 tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
508 return 0;
509
510errout_alloc:
511 if (balloc == 1)
512 tcindex_free_perfect_hash(cp);
513 else if (balloc == 2)
514 kfree(cp->h);
515 tcf_exts_destroy(&new_filter_result.exts);
516errout:
517 kfree(cp);
518 tcf_exts_destroy(&e);
519 return err;
520}
521
522static int
523tcindex_change(struct net *net, struct sk_buff *in_skb,
524 struct tcf_proto *tp, unsigned long base, u32 handle,
525 struct nlattr **tca, void **arg, bool ovr,
526 bool rtnl_held, struct netlink_ext_ack *extack)
527{
528 struct nlattr *opt = tca[TCA_OPTIONS];
529 struct nlattr *tb[TCA_TCINDEX_MAX + 1];
530 struct tcindex_data *p = rtnl_dereference(tp->root);
531 struct tcindex_filter_result *r = *arg;
532 int err;
533
534 pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
535 "p %p,r %p,*arg %p\n",
536 tp, handle, tca, arg, opt, p, r, *arg);
537
538 if (!opt)
539 return 0;
540
541 err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt,
542 tcindex_policy, NULL);
543 if (err < 0)
544 return err;
545
546 return tcindex_set_parms(net, tp, base, handle, p, r, tb,
547 tca[TCA_RATE], ovr, extack);
548}
549
550static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker,
551 bool rtnl_held)
552{
553 struct tcindex_data *p = rtnl_dereference(tp->root);
554 struct tcindex_filter *f, *next;
555 int i;
556
557 pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
558 if (p->perfect) {
559 for (i = 0; i < p->hash; i++) {
560 if (!p->perfect[i].res.class)
561 continue;
562 if (walker->count >= walker->skip) {
563 if (walker->fn(tp, p->perfect + i, walker) < 0) {
564 walker->stop = 1;
565 return;
566 }
567 }
568 walker->count++;
569 }
570 }
571 if (!p->h)
572 return;
573 for (i = 0; i < p->hash; i++) {
574 for (f = rtnl_dereference(p->h[i]); f; f = next) {
575 next = rtnl_dereference(f->next);
576 if (walker->count >= walker->skip) {
577 if (walker->fn(tp, &f->result, walker) < 0) {
578 walker->stop = 1;
579 return;
580 }
581 }
582 walker->count++;
583 }
584 }
585}
586
587static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
588 struct netlink_ext_ack *extack)
589{
590 struct tcindex_data *p = rtnl_dereference(tp->root);
591 int i;
592
593 pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
594
595 if (p->perfect) {
596 for (i = 0; i < p->hash; i++) {
597 struct tcindex_filter_result *r = p->perfect + i;
598
599 /* tcf_queue_work() does not guarantee the ordering we
600 * want, so we have to take this refcnt temporarily to
601 * ensure 'p' is freed after all tcindex_filter_result
602 * here. Imperfect hash does not need this, because it
603 * uses linked lists rather than an array.
604 */
605 tcindex_data_get(p);
606
607 tcf_unbind_filter(tp, &r->res);
608 if (tcf_exts_get_net(&r->exts))
609 tcf_queue_work(&r->rwork,
610 tcindex_destroy_rexts_work);
611 else
612 __tcindex_destroy_rexts(r);
613 }
614 }
615
616 for (i = 0; p->h && i < p->hash; i++) {
617 struct tcindex_filter *f, *next;
618 bool last;
619
620 for (f = rtnl_dereference(p->h[i]); f; f = next) {
621 next = rtnl_dereference(f->next);
622 tcindex_delete(tp, &f->result, &last, rtnl_held, NULL);
623 }
624 }
625
626 tcf_queue_work(&p->rwork, tcindex_destroy_work);
627}
628
629
630static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
631 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
632{
633 struct tcindex_data *p = rtnl_dereference(tp->root);
634 struct tcindex_filter_result *r = fh;
635 struct nlattr *nest;
636
637 pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
638 tp, fh, skb, t, p, r);
639 pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
640
641 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
642 if (nest == NULL)
643 goto nla_put_failure;
644
645 if (!fh) {
646 t->tcm_handle = ~0; /* whatever ... */
647 if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
648 nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
649 nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
650 nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
651 goto nla_put_failure;
652 nla_nest_end(skb, nest);
653 } else {
654 if (p->perfect) {
655 t->tcm_handle = r - p->perfect;
656 } else {
657 struct tcindex_filter *f;
658 struct tcindex_filter __rcu **fp;
659 int i;
660
661 t->tcm_handle = 0;
662 for (i = 0; !t->tcm_handle && i < p->hash; i++) {
663 fp = &p->h[i];
664 for (f = rtnl_dereference(*fp);
665 !t->tcm_handle && f;
666 fp = &f->next, f = rtnl_dereference(*fp)) {
667 if (&f->result == r)
668 t->tcm_handle = f->key;
669 }
670 }
671 }
672 pr_debug("handle = %d\n", t->tcm_handle);
673 if (r->res.class &&
674 nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
675 goto nla_put_failure;
676
677 if (tcf_exts_dump(skb, &r->exts) < 0)
678 goto nla_put_failure;
679 nla_nest_end(skb, nest);
680
681 if (tcf_exts_dump_stats(skb, &r->exts) < 0)
682 goto nla_put_failure;
683 }
684
685 return skb->len;
686
687nla_put_failure:
688 nla_nest_cancel(skb, nest);
689 return -1;
690}
691
692static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl,
693 void *q, unsigned long base)
694{
695 struct tcindex_filter_result *r = fh;
696
697 if (r && r->res.classid == classid) {
698 if (cl)
699 __tcf_bind_filter(q, &r->res, base);
700 else
701 __tcf_unbind_filter(q, &r->res);
702 }
703}
704
705static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
706 .kind = "tcindex",
707 .classify = tcindex_classify,
708 .init = tcindex_init,
709 .destroy = tcindex_destroy,
710 .get = tcindex_get,
711 .change = tcindex_change,
712 .delete = tcindex_delete,
713 .walk = tcindex_walk,
714 .dump = tcindex_dump,
715 .bind_class = tcindex_bind_class,
716 .owner = THIS_MODULE,
717};
718
719static int __init init_tcindex(void)
720{
721 return register_tcf_proto_ops(&cls_tcindex_ops);
722}
723
724static void __exit exit_tcindex(void)
725{
726 unregister_tcf_proto_ops(&cls_tcindex_ops);
727}
728
729module_init(init_tcindex)
730module_exit(exit_tcindex)
731MODULE_LICENSE("GPL");