Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * net/sched/cls_tcindex.c	Packet classifier for skb->tc_index
  4 *
  5 * Written 1998,1999 by Werner Almesberger, EPFL ICA
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/types.h>
 10#include <linux/kernel.h>
 11#include <linux/skbuff.h>
 12#include <linux/errno.h>
 13#include <linux/slab.h>
 
 
 14#include <net/act_api.h>
 15#include <net/netlink.h>
 16#include <net/pkt_cls.h>
 17#include <net/sch_generic.h>
 
 18
 19/*
 20 * Passing parameters to the root seems to be done more awkwardly than really
 21 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
 22 * verified. FIXME.
 23 */
 24
 25#define PERFECT_HASH_THRESHOLD	64	/* use perfect hash if not bigger */
 26#define DEFAULT_HASH_SIZE	64	/* optimized for diffserv */
 27
 28
 
 
 29struct tcindex_filter_result {
 30	struct tcf_exts		exts;
 31	struct tcf_result	res;
 
 32	struct rcu_work		rwork;
 33};
 34
 35struct tcindex_filter {
 36	u16 key;
 37	struct tcindex_filter_result result;
 38	struct tcindex_filter __rcu *next;
 39	struct rcu_work rwork;
 40};
 41
 42
 43struct tcindex_data {
 44	struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
 45	struct tcindex_filter __rcu **h; /* imperfect hash; */
 46	struct tcf_proto *tp;
 47	u16 mask;		/* AND key with mask */
 48	u32 shift;		/* shift ANDed key to the right */
 49	u32 hash;		/* hash table size; 0 if undefined */
 50	u32 alloc_hash;		/* allocated size */
 51	u32 fall_through;	/* 0: only classify if explicit match */
 
 52	struct rcu_work rwork;
 53};
 54
 55static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
 56{
 57	return tcf_exts_has_actions(&r->exts) || r->res.classid;
 58}
 59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 60static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
 61						    u16 key)
 62{
 63	if (p->perfect) {
 64		struct tcindex_filter_result *f = p->perfect + key;
 65
 66		return tcindex_filter_is_set(f) ? f : NULL;
 67	} else if (p->h) {
 68		struct tcindex_filter __rcu **fp;
 69		struct tcindex_filter *f;
 70
 71		fp = &p->h[key % p->hash];
 72		for (f = rcu_dereference_bh_rtnl(*fp);
 73		     f;
 74		     fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
 75			if (f->key == key)
 76				return &f->result;
 77	}
 78
 79	return NULL;
 80}
 81
 82
 83static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 84			    struct tcf_result *res)
 85{
 86	struct tcindex_data *p = rcu_dereference_bh(tp->root);
 87	struct tcindex_filter_result *f;
 88	int key = (skb->tc_index & p->mask) >> p->shift;
 89
 90	pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
 91		 skb, tp, res, p);
 92
 93	f = tcindex_lookup(p, key);
 94	if (!f) {
 95		struct Qdisc *q = tcf_block_q(tp->chain->block);
 96
 97		if (!p->fall_through)
 98			return -1;
 99		res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key);
100		res->class = 0;
101		pr_debug("alg 0x%x\n", res->classid);
102		return 0;
103	}
104	*res = f->res;
105	pr_debug("map 0x%x\n", res->classid);
106
107	return tcf_exts_exec(skb, &f->exts, res);
108}
109
110
111static void *tcindex_get(struct tcf_proto *tp, u32 handle)
112{
113	struct tcindex_data *p = rtnl_dereference(tp->root);
114	struct tcindex_filter_result *r;
115
116	pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
117	if (p->perfect && handle >= p->alloc_hash)
118		return NULL;
119	r = tcindex_lookup(p, handle);
120	return r && tcindex_filter_is_set(r) ? r : NULL;
121}
122
123static int tcindex_init(struct tcf_proto *tp)
124{
125	struct tcindex_data *p;
126
127	pr_debug("tcindex_init(tp %p)\n", tp);
128	p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
129	if (!p)
130		return -ENOMEM;
131
132	p->mask = 0xffff;
133	p->hash = DEFAULT_HASH_SIZE;
134	p->fall_through = 1;
 
135
136	rcu_assign_pointer(tp->root, p);
137	return 0;
138}
139
140static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
141{
142	tcf_exts_destroy(&r->exts);
143	tcf_exts_put_net(&r->exts);
 
144}
145
146static void tcindex_destroy_rexts_work(struct work_struct *work)
147{
148	struct tcindex_filter_result *r;
149
150	r = container_of(to_rcu_work(work),
151			 struct tcindex_filter_result,
152			 rwork);
153	rtnl_lock();
154	__tcindex_destroy_rexts(r);
155	rtnl_unlock();
156}
157
158static void __tcindex_destroy_fexts(struct tcindex_filter *f)
159{
160	tcf_exts_destroy(&f->result.exts);
161	tcf_exts_put_net(&f->result.exts);
162	kfree(f);
163}
164
165static void tcindex_destroy_fexts_work(struct work_struct *work)
166{
167	struct tcindex_filter *f = container_of(to_rcu_work(work),
168						struct tcindex_filter,
169						rwork);
170
171	rtnl_lock();
172	__tcindex_destroy_fexts(f);
173	rtnl_unlock();
174}
175
176static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
177			  bool rtnl_held, struct netlink_ext_ack *extack)
178{
179	struct tcindex_data *p = rtnl_dereference(tp->root);
180	struct tcindex_filter_result *r = arg;
181	struct tcindex_filter __rcu **walk;
182	struct tcindex_filter *f = NULL;
183
184	pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
185	if (p->perfect) {
186		if (!r->res.class)
187			return -ENOENT;
188	} else {
189		int i;
190
191		for (i = 0; i < p->hash; i++) {
192			walk = p->h + i;
193			for (f = rtnl_dereference(*walk); f;
194			     walk = &f->next, f = rtnl_dereference(*walk)) {
195				if (&f->result == r)
196					goto found;
197			}
198		}
199		return -ENOENT;
200
201found:
202		rcu_assign_pointer(*walk, rtnl_dereference(f->next));
203	}
204	tcf_unbind_filter(tp, &r->res);
205	/* all classifiers are required to call tcf_exts_destroy() after rcu
206	 * grace period, since converted-to-rcu actions are relying on that
207	 * in cleanup() callback
208	 */
209	if (f) {
210		if (tcf_exts_get_net(&f->result.exts))
211			tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work);
212		else
213			__tcindex_destroy_fexts(f);
214	} else {
 
 
215		if (tcf_exts_get_net(&r->exts))
216			tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
217		else
218			__tcindex_destroy_rexts(r);
219	}
220
221	*last = false;
222	return 0;
223}
224
225static void tcindex_destroy_work(struct work_struct *work)
226{
227	struct tcindex_data *p = container_of(to_rcu_work(work),
228					      struct tcindex_data,
229					      rwork);
230
231	kfree(p->perfect);
232	kfree(p->h);
233	kfree(p);
234}
235
236static inline int
237valid_perfect_hash(struct tcindex_data *p)
238{
239	return  p->hash > (p->mask >> p->shift);
240}
241
242static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
243	[TCA_TCINDEX_HASH]		= { .type = NLA_U32 },
244	[TCA_TCINDEX_MASK]		= { .type = NLA_U16 },
245	[TCA_TCINDEX_SHIFT]		= { .type = NLA_U32 },
246	[TCA_TCINDEX_FALL_THROUGH]	= { .type = NLA_U32 },
247	[TCA_TCINDEX_CLASSID]		= { .type = NLA_U32 },
248};
249
250static int tcindex_filter_result_init(struct tcindex_filter_result *r,
 
251				      struct net *net)
252{
253	memset(r, 0, sizeof(*r));
 
254	return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
255			     TCA_TCINDEX_POLICE);
256}
257
 
 
258static void tcindex_partial_destroy_work(struct work_struct *work)
259{
260	struct tcindex_data *p = container_of(to_rcu_work(work),
261					      struct tcindex_data,
262					      rwork);
263
264	kfree(p->perfect);
 
 
265	kfree(p);
 
266}
267
268static void tcindex_free_perfect_hash(struct tcindex_data *cp)
269{
270	int i;
271
272	for (i = 0; i < cp->hash; i++)
273		tcf_exts_destroy(&cp->perfect[i].exts);
274	kfree(cp->perfect);
275}
276
277static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
278{
279	int i, err = 0;
280
281	cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
282			      GFP_KERNEL);
283	if (!cp->perfect)
284		return -ENOMEM;
285
286	for (i = 0; i < cp->hash; i++) {
287		err = tcf_exts_init(&cp->perfect[i].exts, net,
288				    TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
289		if (err < 0)
290			goto errout;
 
291	}
292
293	return 0;
294
295errout:
296	tcindex_free_perfect_hash(cp);
297	return err;
298}
299
300static int
301tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
302		  u32 handle, struct tcindex_data *p,
303		  struct tcindex_filter_result *r, struct nlattr **tb,
304		  struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
305{
306	struct tcindex_filter_result new_filter_result, *old_r = r;
307	struct tcindex_data *cp = NULL, *oldp;
308	struct tcindex_filter *f = NULL; /* make gcc behave */
309	struct tcf_result cr = {};
310	int err, balloc = 0;
311	struct tcf_exts e;
 
312
313	err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
314	if (err < 0)
315		return err;
316	err = tcf_exts_validate(net, tp, tb, est, &e, ovr, true, extack);
317	if (err < 0)
318		goto errout;
319
320	err = -ENOMEM;
321	/* tcindex_data attributes must look atomic to classifier/lookup so
322	 * allocate new tcindex data and RCU assign it onto root. Keeping
323	 * perfect hash and hash pointers from old data.
324	 */
325	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
326	if (!cp)
327		goto errout;
328
329	cp->mask = p->mask;
330	cp->shift = p->shift;
331	cp->hash = p->hash;
332	cp->alloc_hash = p->alloc_hash;
333	cp->fall_through = p->fall_through;
334	cp->tp = tp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335
336	if (p->perfect) {
337		int i;
338
339		if (tcindex_alloc_perfect_hash(net, cp) < 0)
340			goto errout;
341		for (i = 0; i < cp->hash; i++)
 
342			cp->perfect[i].res = p->perfect[i].res;
343		balloc = 1;
344	}
345	cp->h = p->h;
346
347	err = tcindex_filter_result_init(&new_filter_result, net);
348	if (err < 0)
349		goto errout1;
350	if (old_r)
351		cr = r->res;
352
353	if (tb[TCA_TCINDEX_HASH])
354		cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
355
356	if (tb[TCA_TCINDEX_MASK])
357		cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
358
359	if (tb[TCA_TCINDEX_SHIFT])
360		cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
361
362	err = -EBUSY;
363
364	/* Hash already allocated, make sure that we still meet the
365	 * requirements for the allocated hash.
366	 */
367	if (cp->perfect) {
368		if (!valid_perfect_hash(cp) ||
369		    cp->hash > cp->alloc_hash)
370			goto errout_alloc;
371	} else if (cp->h && cp->hash != cp->alloc_hash) {
372		goto errout_alloc;
373	}
374
375	err = -EINVAL;
376	if (tb[TCA_TCINDEX_FALL_THROUGH])
377		cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
378
379	if (!cp->hash) {
380		/* Hash not specified, use perfect hash if the upper limit
381		 * of the hashing index is below the threshold.
382		 */
383		if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
384			cp->hash = (cp->mask >> cp->shift) + 1;
385		else
386			cp->hash = DEFAULT_HASH_SIZE;
387	}
388
389	if (!cp->perfect && !cp->h)
390		cp->alloc_hash = cp->hash;
391
392	/* Note: this could be as restrictive as if (handle & ~(mask >> shift))
393	 * but then, we'd fail handles that may become valid after some future
394	 * mask change. While this is extremely unlikely to ever matter,
395	 * the check below is safer (and also more backwards-compatible).
396	 */
397	if (cp->perfect || valid_perfect_hash(cp))
398		if (handle >= cp->alloc_hash)
399			goto errout_alloc;
400
401
402	err = -ENOMEM;
403	if (!cp->perfect && !cp->h) {
404		if (valid_perfect_hash(cp)) {
405			if (tcindex_alloc_perfect_hash(net, cp) < 0)
406				goto errout_alloc;
407			balloc = 1;
408		} else {
409			struct tcindex_filter __rcu **hash;
410
411			hash = kcalloc(cp->hash,
412				       sizeof(struct tcindex_filter *),
413				       GFP_KERNEL);
414
415			if (!hash)
416				goto errout_alloc;
417
418			cp->h = hash;
419			balloc = 2;
420		}
421	}
422
423	if (cp->perfect)
424		r = cp->perfect + handle;
425	else
426		r = tcindex_lookup(cp, handle) ? : &new_filter_result;
 
 
 
427
428	if (r == &new_filter_result) {
429		f = kzalloc(sizeof(*f), GFP_KERNEL);
430		if (!f)
431			goto errout_alloc;
432		f->key = handle;
433		f->next = NULL;
434		err = tcindex_filter_result_init(&f->result, net);
435		if (err < 0) {
436			kfree(f);
437			goto errout_alloc;
438		}
439	}
440
441	if (tb[TCA_TCINDEX_CLASSID]) {
442		cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
443		tcf_bind_filter(tp, &cr, base);
444	}
445
446	if (old_r && old_r != r) {
447		err = tcindex_filter_result_init(old_r, net);
448		if (err < 0) {
449			kfree(f);
450			goto errout_alloc;
451		}
452	}
453
454	oldp = p;
455	r->res = cr;
456	tcf_exts_change(&r->exts, &e);
457
458	rcu_assign_pointer(tp->root, cp);
459
460	if (r == &new_filter_result) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
461		struct tcindex_filter *nfp;
462		struct tcindex_filter __rcu **fp;
463
464		f->result.res = r->res;
465		tcf_exts_change(&f->result.exts, &r->exts);
466
467		fp = cp->h + (handle % cp->hash);
468		for (nfp = rtnl_dereference(*fp);
469		     nfp;
470		     fp = &nfp->next, nfp = rtnl_dereference(*fp))
471				; /* nothing */
472
473		rcu_assign_pointer(*fp, f);
474	} else {
475		tcf_exts_destroy(&new_filter_result.exts);
476	}
477
478	if (oldp)
479		tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
480	return 0;
481
482errout_alloc:
483	if (balloc == 1)
484		tcindex_free_perfect_hash(cp);
485	else if (balloc == 2)
486		kfree(cp->h);
487errout1:
488	tcf_exts_destroy(&new_filter_result.exts);
489errout:
490	kfree(cp);
491	tcf_exts_destroy(&e);
492	return err;
493}
494
495static int
496tcindex_change(struct net *net, struct sk_buff *in_skb,
497	       struct tcf_proto *tp, unsigned long base, u32 handle,
498	       struct nlattr **tca, void **arg, bool ovr,
499	       bool rtnl_held, struct netlink_ext_ack *extack)
500{
501	struct nlattr *opt = tca[TCA_OPTIONS];
502	struct nlattr *tb[TCA_TCINDEX_MAX + 1];
503	struct tcindex_data *p = rtnl_dereference(tp->root);
504	struct tcindex_filter_result *r = *arg;
505	int err;
506
507	pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
508	    "p %p,r %p,*arg %p\n",
509	    tp, handle, tca, arg, opt, p, r, arg ? *arg : NULL);
510
511	if (!opt)
512		return 0;
513
514	err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt,
515					  tcindex_policy, NULL);
516	if (err < 0)
517		return err;
518
519	return tcindex_set_parms(net, tp, base, handle, p, r, tb,
520				 tca[TCA_RATE], ovr, extack);
521}
522
523static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker,
524			 bool rtnl_held)
525{
526	struct tcindex_data *p = rtnl_dereference(tp->root);
527	struct tcindex_filter *f, *next;
528	int i;
529
530	pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
531	if (p->perfect) {
532		for (i = 0; i < p->hash; i++) {
533			if (!p->perfect[i].res.class)
534				continue;
535			if (walker->count >= walker->skip) {
536				if (walker->fn(tp, p->perfect + i, walker) < 0) {
537					walker->stop = 1;
538					return;
539				}
540			}
541			walker->count++;
542		}
543	}
544	if (!p->h)
545		return;
546	for (i = 0; i < p->hash; i++) {
547		for (f = rtnl_dereference(p->h[i]); f; f = next) {
548			next = rtnl_dereference(f->next);
549			if (walker->count >= walker->skip) {
550				if (walker->fn(tp, &f->result, walker) < 0) {
551					walker->stop = 1;
552					return;
553				}
554			}
555			walker->count++;
556		}
557	}
558}
559
560static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
561			    struct netlink_ext_ack *extack)
562{
563	struct tcindex_data *p = rtnl_dereference(tp->root);
564	int i;
565
566	pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
567
568	if (p->perfect) {
569		for (i = 0; i < p->hash; i++) {
570			struct tcindex_filter_result *r = p->perfect + i;
571
 
 
 
 
 
 
 
 
572			tcf_unbind_filter(tp, &r->res);
573			if (tcf_exts_get_net(&r->exts))
574				tcf_queue_work(&r->rwork,
575					       tcindex_destroy_rexts_work);
576			else
577				__tcindex_destroy_rexts(r);
578		}
579	}
580
581	for (i = 0; p->h && i < p->hash; i++) {
582		struct tcindex_filter *f, *next;
583		bool last;
584
585		for (f = rtnl_dereference(p->h[i]); f; f = next) {
586			next = rtnl_dereference(f->next);
587			tcindex_delete(tp, &f->result, &last, rtnl_held, NULL);
588		}
589	}
590
591	tcf_queue_work(&p->rwork, tcindex_destroy_work);
592}
593
594
595static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
596			struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
597{
598	struct tcindex_data *p = rtnl_dereference(tp->root);
599	struct tcindex_filter_result *r = fh;
600	struct nlattr *nest;
601
602	pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
603		 tp, fh, skb, t, p, r);
604	pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
605
606	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
607	if (nest == NULL)
608		goto nla_put_failure;
609
610	if (!fh) {
611		t->tcm_handle = ~0; /* whatever ... */
612		if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
613		    nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
614		    nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
615		    nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
616			goto nla_put_failure;
617		nla_nest_end(skb, nest);
618	} else {
619		if (p->perfect) {
620			t->tcm_handle = r - p->perfect;
621		} else {
622			struct tcindex_filter *f;
623			struct tcindex_filter __rcu **fp;
624			int i;
625
626			t->tcm_handle = 0;
627			for (i = 0; !t->tcm_handle && i < p->hash; i++) {
628				fp = &p->h[i];
629				for (f = rtnl_dereference(*fp);
630				     !t->tcm_handle && f;
631				     fp = &f->next, f = rtnl_dereference(*fp)) {
632					if (&f->result == r)
633						t->tcm_handle = f->key;
634				}
635			}
636		}
637		pr_debug("handle = %d\n", t->tcm_handle);
638		if (r->res.class &&
639		    nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
640			goto nla_put_failure;
641
642		if (tcf_exts_dump(skb, &r->exts) < 0)
643			goto nla_put_failure;
644		nla_nest_end(skb, nest);
645
646		if (tcf_exts_dump_stats(skb, &r->exts) < 0)
647			goto nla_put_failure;
648	}
649
650	return skb->len;
651
652nla_put_failure:
653	nla_nest_cancel(skb, nest);
654	return -1;
655}
656
657static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl)
 
658{
659	struct tcindex_filter_result *r = fh;
660
661	if (r && r->res.classid == classid)
662		r->res.class = cl;
663}
664
665static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
666	.kind		=	"tcindex",
667	.classify	=	tcindex_classify,
668	.init		=	tcindex_init,
669	.destroy	=	tcindex_destroy,
670	.get		=	tcindex_get,
671	.change		=	tcindex_change,
672	.delete		=	tcindex_delete,
673	.walk		=	tcindex_walk,
674	.dump		=	tcindex_dump,
675	.bind_class	=	tcindex_bind_class,
676	.owner		=	THIS_MODULE,
677};
678
679static int __init init_tcindex(void)
680{
681	return register_tcf_proto_ops(&cls_tcindex_ops);
682}
683
684static void __exit exit_tcindex(void)
685{
686	unregister_tcf_proto_ops(&cls_tcindex_ops);
687}
688
689module_init(init_tcindex)
690module_exit(exit_tcindex)
691MODULE_LICENSE("GPL");
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * net/sched/cls_tcindex.c	Packet classifier for skb->tc_index
  4 *
  5 * Written 1998,1999 by Werner Almesberger, EPFL ICA
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/types.h>
 10#include <linux/kernel.h>
 11#include <linux/skbuff.h>
 12#include <linux/errno.h>
 13#include <linux/slab.h>
 14#include <linux/refcount.h>
 15#include <linux/rcupdate.h>
 16#include <net/act_api.h>
 17#include <net/netlink.h>
 18#include <net/pkt_cls.h>
 19#include <net/sch_generic.h>
 20#include <net/tc_wrapper.h>
 21
 22/*
 23 * Passing parameters to the root seems to be done more awkwardly than really
 24 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
 25 * verified. FIXME.
 26 */
 27
 28#define PERFECT_HASH_THRESHOLD	64	/* use perfect hash if not bigger */
 29#define DEFAULT_HASH_SIZE	64	/* optimized for diffserv */
 30
 31
 32struct tcindex_data;
 33
 34struct tcindex_filter_result {
 35	struct tcf_exts		exts;
 36	struct tcf_result	res;
 37	struct tcindex_data	*p;
 38	struct rcu_work		rwork;
 39};
 40
 41struct tcindex_filter {
 42	u16 key;
 43	struct tcindex_filter_result result;
 44	struct tcindex_filter __rcu *next;
 45	struct rcu_work rwork;
 46};
 47
 48
 49struct tcindex_data {
 50	struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
 51	struct tcindex_filter __rcu **h; /* imperfect hash; */
 52	struct tcf_proto *tp;
 53	u16 mask;		/* AND key with mask */
 54	u32 shift;		/* shift ANDed key to the right */
 55	u32 hash;		/* hash table size; 0 if undefined */
 56	u32 alloc_hash;		/* allocated size */
 57	u32 fall_through;	/* 0: only classify if explicit match */
 58	refcount_t refcnt;	/* a temporary refcnt for perfect hash */
 59	struct rcu_work rwork;
 60};
 61
 62static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
 63{
 64	return tcf_exts_has_actions(&r->exts) || r->res.classid;
 65}
 66
 67static void tcindex_data_get(struct tcindex_data *p)
 68{
 69	refcount_inc(&p->refcnt);
 70}
 71
 72static void tcindex_data_put(struct tcindex_data *p)
 73{
 74	if (refcount_dec_and_test(&p->refcnt)) {
 75		kfree(p->perfect);
 76		kfree(p->h);
 77		kfree(p);
 78	}
 79}
 80
 81static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
 82						    u16 key)
 83{
 84	if (p->perfect) {
 85		struct tcindex_filter_result *f = p->perfect + key;
 86
 87		return tcindex_filter_is_set(f) ? f : NULL;
 88	} else if (p->h) {
 89		struct tcindex_filter __rcu **fp;
 90		struct tcindex_filter *f;
 91
 92		fp = &p->h[key % p->hash];
 93		for (f = rcu_dereference_bh_rtnl(*fp);
 94		     f;
 95		     fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
 96			if (f->key == key)
 97				return &f->result;
 98	}
 99
100	return NULL;
101}
102
103TC_INDIRECT_SCOPE int tcindex_classify(struct sk_buff *skb,
104				       const struct tcf_proto *tp,
105				       struct tcf_result *res)
106{
107	struct tcindex_data *p = rcu_dereference_bh(tp->root);
108	struct tcindex_filter_result *f;
109	int key = (skb->tc_index & p->mask) >> p->shift;
110
111	pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
112		 skb, tp, res, p);
113
114	f = tcindex_lookup(p, key);
115	if (!f) {
116		struct Qdisc *q = tcf_block_q(tp->chain->block);
117
118		if (!p->fall_through)
119			return -1;
120		res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key);
121		res->class = 0;
122		pr_debug("alg 0x%x\n", res->classid);
123		return 0;
124	}
125	*res = f->res;
126	pr_debug("map 0x%x\n", res->classid);
127
128	return tcf_exts_exec(skb, &f->exts, res);
129}
130
131
132static void *tcindex_get(struct tcf_proto *tp, u32 handle)
133{
134	struct tcindex_data *p = rtnl_dereference(tp->root);
135	struct tcindex_filter_result *r;
136
137	pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
138	if (p->perfect && handle >= p->alloc_hash)
139		return NULL;
140	r = tcindex_lookup(p, handle);
141	return r && tcindex_filter_is_set(r) ? r : NULL;
142}
143
144static int tcindex_init(struct tcf_proto *tp)
145{
146	struct tcindex_data *p;
147
148	pr_debug("tcindex_init(tp %p)\n", tp);
149	p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
150	if (!p)
151		return -ENOMEM;
152
153	p->mask = 0xffff;
154	p->hash = DEFAULT_HASH_SIZE;
155	p->fall_through = 1;
156	refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */
157
158	rcu_assign_pointer(tp->root, p);
159	return 0;
160}
161
162static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
163{
164	tcf_exts_destroy(&r->exts);
165	tcf_exts_put_net(&r->exts);
166	tcindex_data_put(r->p);
167}
168
169static void tcindex_destroy_rexts_work(struct work_struct *work)
170{
171	struct tcindex_filter_result *r;
172
173	r = container_of(to_rcu_work(work),
174			 struct tcindex_filter_result,
175			 rwork);
176	rtnl_lock();
177	__tcindex_destroy_rexts(r);
178	rtnl_unlock();
179}
180
181static void __tcindex_destroy_fexts(struct tcindex_filter *f)
182{
183	tcf_exts_destroy(&f->result.exts);
184	tcf_exts_put_net(&f->result.exts);
185	kfree(f);
186}
187
188static void tcindex_destroy_fexts_work(struct work_struct *work)
189{
190	struct tcindex_filter *f = container_of(to_rcu_work(work),
191						struct tcindex_filter,
192						rwork);
193
194	rtnl_lock();
195	__tcindex_destroy_fexts(f);
196	rtnl_unlock();
197}
198
199static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
200			  bool rtnl_held, struct netlink_ext_ack *extack)
201{
202	struct tcindex_data *p = rtnl_dereference(tp->root);
203	struct tcindex_filter_result *r = arg;
204	struct tcindex_filter __rcu **walk;
205	struct tcindex_filter *f = NULL;
206
207	pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
208	if (p->perfect) {
209		if (!r->res.class)
210			return -ENOENT;
211	} else {
212		int i;
213
214		for (i = 0; i < p->hash; i++) {
215			walk = p->h + i;
216			for (f = rtnl_dereference(*walk); f;
217			     walk = &f->next, f = rtnl_dereference(*walk)) {
218				if (&f->result == r)
219					goto found;
220			}
221		}
222		return -ENOENT;
223
224found:
225		rcu_assign_pointer(*walk, rtnl_dereference(f->next));
226	}
227	tcf_unbind_filter(tp, &r->res);
228	/* all classifiers are required to call tcf_exts_destroy() after rcu
229	 * grace period, since converted-to-rcu actions are relying on that
230	 * in cleanup() callback
231	 */
232	if (f) {
233		if (tcf_exts_get_net(&f->result.exts))
234			tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work);
235		else
236			__tcindex_destroy_fexts(f);
237	} else {
238		tcindex_data_get(p);
239
240		if (tcf_exts_get_net(&r->exts))
241			tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
242		else
243			__tcindex_destroy_rexts(r);
244	}
245
246	*last = false;
247	return 0;
248}
249
250static void tcindex_destroy_work(struct work_struct *work)
251{
252	struct tcindex_data *p = container_of(to_rcu_work(work),
253					      struct tcindex_data,
254					      rwork);
255
256	tcindex_data_put(p);
 
 
257}
258
259static inline int
260valid_perfect_hash(struct tcindex_data *p)
261{
262	return  p->hash > (p->mask >> p->shift);
263}
264
265static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
266	[TCA_TCINDEX_HASH]		= { .type = NLA_U32 },
267	[TCA_TCINDEX_MASK]		= { .type = NLA_U16 },
268	[TCA_TCINDEX_SHIFT]		= { .type = NLA_U32 },
269	[TCA_TCINDEX_FALL_THROUGH]	= { .type = NLA_U32 },
270	[TCA_TCINDEX_CLASSID]		= { .type = NLA_U32 },
271};
272
273static int tcindex_filter_result_init(struct tcindex_filter_result *r,
274				      struct tcindex_data *p,
275				      struct net *net)
276{
277	memset(r, 0, sizeof(*r));
278	r->p = p;
279	return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
280			     TCA_TCINDEX_POLICE);
281}
282
283static void tcindex_free_perfect_hash(struct tcindex_data *cp);
284
285static void tcindex_partial_destroy_work(struct work_struct *work)
286{
287	struct tcindex_data *p = container_of(to_rcu_work(work),
288					      struct tcindex_data,
289					      rwork);
290
291	rtnl_lock();
292	if (p->perfect)
293		tcindex_free_perfect_hash(p);
294	kfree(p);
295	rtnl_unlock();
296}
297
298static void tcindex_free_perfect_hash(struct tcindex_data *cp)
299{
300	int i;
301
302	for (i = 0; i < cp->hash; i++)
303		tcf_exts_destroy(&cp->perfect[i].exts);
304	kfree(cp->perfect);
305}
306
307static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
308{
309	int i, err = 0;
310
311	cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
312			      GFP_KERNEL | __GFP_NOWARN);
313	if (!cp->perfect)
314		return -ENOMEM;
315
316	for (i = 0; i < cp->hash; i++) {
317		err = tcf_exts_init(&cp->perfect[i].exts, net,
318				    TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
319		if (err < 0)
320			goto errout;
321		cp->perfect[i].p = cp;
322	}
323
324	return 0;
325
326errout:
327	tcindex_free_perfect_hash(cp);
328	return err;
329}
330
331static int
332tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
333		  u32 handle, struct tcindex_data *p,
334		  struct tcindex_filter_result *r, struct nlattr **tb,
335		  struct nlattr *est, u32 flags, struct netlink_ext_ack *extack)
336{
337	struct tcindex_filter_result new_filter_result;
338	struct tcindex_data *cp = NULL, *oldp;
339	struct tcindex_filter *f = NULL; /* make gcc behave */
340	struct tcf_result cr = {};
341	int err, balloc = 0;
342	struct tcf_exts e;
343	bool update_h = false;
344
345	err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
346	if (err < 0)
347		return err;
348	err = tcf_exts_validate(net, tp, tb, est, &e, flags, extack);
349	if (err < 0)
350		goto errout;
351
352	err = -ENOMEM;
353	/* tcindex_data attributes must look atomic to classifier/lookup so
354	 * allocate new tcindex data and RCU assign it onto root. Keeping
355	 * perfect hash and hash pointers from old data.
356	 */
357	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
358	if (!cp)
359		goto errout;
360
361	cp->mask = p->mask;
362	cp->shift = p->shift;
363	cp->hash = p->hash;
364	cp->alloc_hash = p->alloc_hash;
365	cp->fall_through = p->fall_through;
366	cp->tp = tp;
367	refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */
368
369	if (tb[TCA_TCINDEX_HASH])
370		cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
371
372	if (tb[TCA_TCINDEX_MASK])
373		cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
374
375	if (tb[TCA_TCINDEX_SHIFT]) {
376		cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
377		if (cp->shift > 16) {
378			err = -EINVAL;
379			goto errout;
380		}
381	}
382	if (!cp->hash) {
383		/* Hash not specified, use perfect hash if the upper limit
384		 * of the hashing index is below the threshold.
385		 */
386		if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
387			cp->hash = (cp->mask >> cp->shift) + 1;
388		else
389			cp->hash = DEFAULT_HASH_SIZE;
390	}
391
392	if (p->perfect) {
393		int i;
394
395		if (tcindex_alloc_perfect_hash(net, cp) < 0)
396			goto errout;
397		cp->alloc_hash = cp->hash;
398		for (i = 0; i < min(cp->hash, p->hash); i++)
399			cp->perfect[i].res = p->perfect[i].res;
400		balloc = 1;
401	}
402	cp->h = p->h;
403
404	err = tcindex_filter_result_init(&new_filter_result, cp, net);
405	if (err < 0)
406		goto errout_alloc;
407	if (r)
408		cr = r->res;
409
 
 
 
 
 
 
 
 
 
410	err = -EBUSY;
411
412	/* Hash already allocated, make sure that we still meet the
413	 * requirements for the allocated hash.
414	 */
415	if (cp->perfect) {
416		if (!valid_perfect_hash(cp) ||
417		    cp->hash > cp->alloc_hash)
418			goto errout_alloc;
419	} else if (cp->h && cp->hash != cp->alloc_hash) {
420		goto errout_alloc;
421	}
422
423	err = -EINVAL;
424	if (tb[TCA_TCINDEX_FALL_THROUGH])
425		cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
426
 
 
 
 
 
 
 
 
 
 
427	if (!cp->perfect && !cp->h)
428		cp->alloc_hash = cp->hash;
429
430	/* Note: this could be as restrictive as if (handle & ~(mask >> shift))
431	 * but then, we'd fail handles that may become valid after some future
432	 * mask change. While this is extremely unlikely to ever matter,
433	 * the check below is safer (and also more backwards-compatible).
434	 */
435	if (cp->perfect || valid_perfect_hash(cp))
436		if (handle >= cp->alloc_hash)
437			goto errout_alloc;
438
439
440	err = -ENOMEM;
441	if (!cp->perfect && !cp->h) {
442		if (valid_perfect_hash(cp)) {
443			if (tcindex_alloc_perfect_hash(net, cp) < 0)
444				goto errout_alloc;
445			balloc = 1;
446		} else {
447			struct tcindex_filter __rcu **hash;
448
449			hash = kcalloc(cp->hash,
450				       sizeof(struct tcindex_filter *),
451				       GFP_KERNEL);
452
453			if (!hash)
454				goto errout_alloc;
455
456			cp->h = hash;
457			balloc = 2;
458		}
459	}
460
461	if (cp->perfect) {
462		r = cp->perfect + handle;
463	} else {
464		/* imperfect area is updated in-place using rcu */
465		update_h = !!tcindex_lookup(cp, handle);
466		r = &new_filter_result;
467	}
468
469	if (r == &new_filter_result) {
470		f = kzalloc(sizeof(*f), GFP_KERNEL);
471		if (!f)
472			goto errout_alloc;
473		f->key = handle;
474		f->next = NULL;
475		err = tcindex_filter_result_init(&f->result, cp, net);
476		if (err < 0) {
477			kfree(f);
478			goto errout_alloc;
479		}
480	}
481
482	if (tb[TCA_TCINDEX_CLASSID]) {
483		cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
484		tcf_bind_filter(tp, &cr, base);
485	}
486
 
 
 
 
 
 
 
 
487	oldp = p;
488	r->res = cr;
489	tcf_exts_change(&r->exts, &e);
490
491	rcu_assign_pointer(tp->root, cp);
492
493	if (update_h) {
494		struct tcindex_filter __rcu **fp;
495		struct tcindex_filter *cf;
496
497		f->result.res = r->res;
498		tcf_exts_change(&f->result.exts, &r->exts);
499
500		/* imperfect area bucket */
501		fp = cp->h + (handle % cp->hash);
502
503		/* lookup the filter, guaranteed to exist */
504		for (cf = rcu_dereference_bh_rtnl(*fp); cf;
505		     fp = &cf->next, cf = rcu_dereference_bh_rtnl(*fp))
506			if (cf->key == (u16)handle)
507				break;
508
509		f->next = cf->next;
510
511		cf = rcu_replace_pointer(*fp, f, 1);
512		tcf_exts_get_net(&cf->result.exts);
513		tcf_queue_work(&cf->rwork, tcindex_destroy_fexts_work);
514	} else if (r == &new_filter_result) {
515		struct tcindex_filter *nfp;
516		struct tcindex_filter __rcu **fp;
517
518		f->result.res = r->res;
519		tcf_exts_change(&f->result.exts, &r->exts);
520
521		fp = cp->h + (handle % cp->hash);
522		for (nfp = rtnl_dereference(*fp);
523		     nfp;
524		     fp = &nfp->next, nfp = rtnl_dereference(*fp))
525				; /* nothing */
526
527		rcu_assign_pointer(*fp, f);
528	} else {
529		tcf_exts_destroy(&new_filter_result.exts);
530	}
531
532	if (oldp)
533		tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
534	return 0;
535
536errout_alloc:
537	if (balloc == 1)
538		tcindex_free_perfect_hash(cp);
539	else if (balloc == 2)
540		kfree(cp->h);
 
541	tcf_exts_destroy(&new_filter_result.exts);
542errout:
543	kfree(cp);
544	tcf_exts_destroy(&e);
545	return err;
546}
547
548static int
549tcindex_change(struct net *net, struct sk_buff *in_skb,
550	       struct tcf_proto *tp, unsigned long base, u32 handle,
551	       struct nlattr **tca, void **arg, u32 flags,
552	       struct netlink_ext_ack *extack)
553{
554	struct nlattr *opt = tca[TCA_OPTIONS];
555	struct nlattr *tb[TCA_TCINDEX_MAX + 1];
556	struct tcindex_data *p = rtnl_dereference(tp->root);
557	struct tcindex_filter_result *r = *arg;
558	int err;
559
560	pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
561	    "p %p,r %p,*arg %p\n",
562	    tp, handle, tca, arg, opt, p, r, *arg);
563
564	if (!opt)
565		return 0;
566
567	err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt,
568					  tcindex_policy, NULL);
569	if (err < 0)
570		return err;
571
572	return tcindex_set_parms(net, tp, base, handle, p, r, tb,
573				 tca[TCA_RATE], flags, extack);
574}
575
576static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker,
577			 bool rtnl_held)
578{
579	struct tcindex_data *p = rtnl_dereference(tp->root);
580	struct tcindex_filter *f, *next;
581	int i;
582
583	pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
584	if (p->perfect) {
585		for (i = 0; i < p->hash; i++) {
586			if (!p->perfect[i].res.class)
587				continue;
588			if (!tc_cls_stats_dump(tp, walker, p->perfect + i))
589				return;
 
 
 
 
 
590		}
591	}
592	if (!p->h)
593		return;
594	for (i = 0; i < p->hash; i++) {
595		for (f = rtnl_dereference(p->h[i]); f; f = next) {
596			next = rtnl_dereference(f->next);
597			if (!tc_cls_stats_dump(tp, walker, &f->result))
598				return;
 
 
 
 
 
599		}
600	}
601}
602
603static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
604			    struct netlink_ext_ack *extack)
605{
606	struct tcindex_data *p = rtnl_dereference(tp->root);
607	int i;
608
609	pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
610
611	if (p->perfect) {
612		for (i = 0; i < p->hash; i++) {
613			struct tcindex_filter_result *r = p->perfect + i;
614
615			/* tcf_queue_work() does not guarantee the ordering we
616			 * want, so we have to take this refcnt temporarily to
617			 * ensure 'p' is freed after all tcindex_filter_result
618			 * here. Imperfect hash does not need this, because it
619			 * uses linked lists rather than an array.
620			 */
621			tcindex_data_get(p);
622
623			tcf_unbind_filter(tp, &r->res);
624			if (tcf_exts_get_net(&r->exts))
625				tcf_queue_work(&r->rwork,
626					       tcindex_destroy_rexts_work);
627			else
628				__tcindex_destroy_rexts(r);
629		}
630	}
631
632	for (i = 0; p->h && i < p->hash; i++) {
633		struct tcindex_filter *f, *next;
634		bool last;
635
636		for (f = rtnl_dereference(p->h[i]); f; f = next) {
637			next = rtnl_dereference(f->next);
638			tcindex_delete(tp, &f->result, &last, rtnl_held, NULL);
639		}
640	}
641
642	tcf_queue_work(&p->rwork, tcindex_destroy_work);
643}
644
645
646static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
647			struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
648{
649	struct tcindex_data *p = rtnl_dereference(tp->root);
650	struct tcindex_filter_result *r = fh;
651	struct nlattr *nest;
652
653	pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
654		 tp, fh, skb, t, p, r);
655	pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
656
657	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
658	if (nest == NULL)
659		goto nla_put_failure;
660
661	if (!fh) {
662		t->tcm_handle = ~0; /* whatever ... */
663		if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
664		    nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
665		    nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
666		    nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
667			goto nla_put_failure;
668		nla_nest_end(skb, nest);
669	} else {
670		if (p->perfect) {
671			t->tcm_handle = r - p->perfect;
672		} else {
673			struct tcindex_filter *f;
674			struct tcindex_filter __rcu **fp;
675			int i;
676
677			t->tcm_handle = 0;
678			for (i = 0; !t->tcm_handle && i < p->hash; i++) {
679				fp = &p->h[i];
680				for (f = rtnl_dereference(*fp);
681				     !t->tcm_handle && f;
682				     fp = &f->next, f = rtnl_dereference(*fp)) {
683					if (&f->result == r)
684						t->tcm_handle = f->key;
685				}
686			}
687		}
688		pr_debug("handle = %d\n", t->tcm_handle);
689		if (r->res.class &&
690		    nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
691			goto nla_put_failure;
692
693		if (tcf_exts_dump(skb, &r->exts) < 0)
694			goto nla_put_failure;
695		nla_nest_end(skb, nest);
696
697		if (tcf_exts_dump_stats(skb, &r->exts) < 0)
698			goto nla_put_failure;
699	}
700
701	return skb->len;
702
703nla_put_failure:
704	nla_nest_cancel(skb, nest);
705	return -1;
706}
707
708static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl,
709			       void *q, unsigned long base)
710{
711	struct tcindex_filter_result *r = fh;
712
713	tc_cls_bind_class(classid, cl, q, &r->res, base);
 
714}
715
716static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
717	.kind		=	"tcindex",
718	.classify	=	tcindex_classify,
719	.init		=	tcindex_init,
720	.destroy	=	tcindex_destroy,
721	.get		=	tcindex_get,
722	.change		=	tcindex_change,
723	.delete		=	tcindex_delete,
724	.walk		=	tcindex_walk,
725	.dump		=	tcindex_dump,
726	.bind_class	=	tcindex_bind_class,
727	.owner		=	THIS_MODULE,
728};
729
730static int __init init_tcindex(void)
731{
732	return register_tcf_proto_ops(&cls_tcindex_ops);
733}
734
735static void __exit exit_tcindex(void)
736{
737	unregister_tcf_proto_ops(&cls_tcindex_ops);
738}
739
740module_init(init_tcindex)
741module_exit(exit_tcindex)
742MODULE_LICENSE("GPL");