Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * net/sched/cls_tcindex.c	Packet classifier for skb->tc_index
  4 *
  5 * Written 1998,1999 by Werner Almesberger, EPFL ICA
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/types.h>
 10#include <linux/kernel.h>
 11#include <linux/skbuff.h>
 12#include <linux/errno.h>
 13#include <linux/slab.h>
 
 14#include <net/act_api.h>
 15#include <net/netlink.h>
 16#include <net/pkt_cls.h>
 17#include <net/sch_generic.h>
 18
 19/*
 20 * Passing parameters to the root seems to be done more awkwardly than really
 21 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
 22 * verified. FIXME.
 23 */
 24
 25#define PERFECT_HASH_THRESHOLD	64	/* use perfect hash if not bigger */
 26#define DEFAULT_HASH_SIZE	64	/* optimized for diffserv */
 27
 28
 
 
 29struct tcindex_filter_result {
 30	struct tcf_exts		exts;
 31	struct tcf_result	res;
 
 32	struct rcu_work		rwork;
 33};
 34
 35struct tcindex_filter {
 36	u16 key;
 37	struct tcindex_filter_result result;
 38	struct tcindex_filter __rcu *next;
 39	struct rcu_work rwork;
 40};
 41
 42
 43struct tcindex_data {
 44	struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
 45	struct tcindex_filter __rcu **h; /* imperfect hash; */
 46	struct tcf_proto *tp;
 47	u16 mask;		/* AND key with mask */
 48	u32 shift;		/* shift ANDed key to the right */
 49	u32 hash;		/* hash table size; 0 if undefined */
 50	u32 alloc_hash;		/* allocated size */
 51	u32 fall_through;	/* 0: only classify if explicit match */
 
 52	struct rcu_work rwork;
 53};
 54
 55static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
 56{
 57	return tcf_exts_has_actions(&r->exts) || r->res.classid;
 58}
 59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 60static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
 61						    u16 key)
 62{
 63	if (p->perfect) {
 64		struct tcindex_filter_result *f = p->perfect + key;
 65
 66		return tcindex_filter_is_set(f) ? f : NULL;
 67	} else if (p->h) {
 68		struct tcindex_filter __rcu **fp;
 69		struct tcindex_filter *f;
 70
 71		fp = &p->h[key % p->hash];
 72		for (f = rcu_dereference_bh_rtnl(*fp);
 73		     f;
 74		     fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
 75			if (f->key == key)
 76				return &f->result;
 77	}
 78
 79	return NULL;
 80}
 81
 82
 83static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 84			    struct tcf_result *res)
 85{
 86	struct tcindex_data *p = rcu_dereference_bh(tp->root);
 87	struct tcindex_filter_result *f;
 88	int key = (skb->tc_index & p->mask) >> p->shift;
 89
 90	pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
 91		 skb, tp, res, p);
 92
 93	f = tcindex_lookup(p, key);
 94	if (!f) {
 95		struct Qdisc *q = tcf_block_q(tp->chain->block);
 96
 97		if (!p->fall_through)
 98			return -1;
 99		res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key);
100		res->class = 0;
101		pr_debug("alg 0x%x\n", res->classid);
102		return 0;
103	}
104	*res = f->res;
105	pr_debug("map 0x%x\n", res->classid);
106
107	return tcf_exts_exec(skb, &f->exts, res);
108}
109
110
111static void *tcindex_get(struct tcf_proto *tp, u32 handle)
112{
113	struct tcindex_data *p = rtnl_dereference(tp->root);
114	struct tcindex_filter_result *r;
115
116	pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
117	if (p->perfect && handle >= p->alloc_hash)
118		return NULL;
119	r = tcindex_lookup(p, handle);
120	return r && tcindex_filter_is_set(r) ? r : NULL;
121}
122
123static int tcindex_init(struct tcf_proto *tp)
124{
125	struct tcindex_data *p;
126
127	pr_debug("tcindex_init(tp %p)\n", tp);
128	p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
129	if (!p)
130		return -ENOMEM;
131
132	p->mask = 0xffff;
133	p->hash = DEFAULT_HASH_SIZE;
134	p->fall_through = 1;
 
135
136	rcu_assign_pointer(tp->root, p);
137	return 0;
138}
139
140static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
141{
142	tcf_exts_destroy(&r->exts);
143	tcf_exts_put_net(&r->exts);
 
144}
145
146static void tcindex_destroy_rexts_work(struct work_struct *work)
147{
148	struct tcindex_filter_result *r;
149
150	r = container_of(to_rcu_work(work),
151			 struct tcindex_filter_result,
152			 rwork);
153	rtnl_lock();
154	__tcindex_destroy_rexts(r);
155	rtnl_unlock();
156}
157
158static void __tcindex_destroy_fexts(struct tcindex_filter *f)
159{
160	tcf_exts_destroy(&f->result.exts);
161	tcf_exts_put_net(&f->result.exts);
162	kfree(f);
163}
164
165static void tcindex_destroy_fexts_work(struct work_struct *work)
166{
167	struct tcindex_filter *f = container_of(to_rcu_work(work),
168						struct tcindex_filter,
169						rwork);
170
171	rtnl_lock();
172	__tcindex_destroy_fexts(f);
173	rtnl_unlock();
174}
175
176static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
177			  bool rtnl_held, struct netlink_ext_ack *extack)
178{
179	struct tcindex_data *p = rtnl_dereference(tp->root);
180	struct tcindex_filter_result *r = arg;
181	struct tcindex_filter __rcu **walk;
182	struct tcindex_filter *f = NULL;
183
184	pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
185	if (p->perfect) {
186		if (!r->res.class)
187			return -ENOENT;
188	} else {
189		int i;
190
191		for (i = 0; i < p->hash; i++) {
192			walk = p->h + i;
193			for (f = rtnl_dereference(*walk); f;
194			     walk = &f->next, f = rtnl_dereference(*walk)) {
195				if (&f->result == r)
196					goto found;
197			}
198		}
199		return -ENOENT;
200
201found:
202		rcu_assign_pointer(*walk, rtnl_dereference(f->next));
203	}
204	tcf_unbind_filter(tp, &r->res);
205	/* all classifiers are required to call tcf_exts_destroy() after rcu
206	 * grace period, since converted-to-rcu actions are relying on that
207	 * in cleanup() callback
208	 */
209	if (f) {
210		if (tcf_exts_get_net(&f->result.exts))
211			tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work);
212		else
213			__tcindex_destroy_fexts(f);
214	} else {
 
 
215		if (tcf_exts_get_net(&r->exts))
216			tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
217		else
218			__tcindex_destroy_rexts(r);
219	}
220
221	*last = false;
222	return 0;
223}
224
225static void tcindex_destroy_work(struct work_struct *work)
226{
227	struct tcindex_data *p = container_of(to_rcu_work(work),
228					      struct tcindex_data,
229					      rwork);
230
231	kfree(p->perfect);
232	kfree(p->h);
233	kfree(p);
234}
235
236static inline int
237valid_perfect_hash(struct tcindex_data *p)
238{
239	return  p->hash > (p->mask >> p->shift);
240}
241
242static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
243	[TCA_TCINDEX_HASH]		= { .type = NLA_U32 },
244	[TCA_TCINDEX_MASK]		= { .type = NLA_U16 },
245	[TCA_TCINDEX_SHIFT]		= { .type = NLA_U32 },
246	[TCA_TCINDEX_FALL_THROUGH]	= { .type = NLA_U32 },
247	[TCA_TCINDEX_CLASSID]		= { .type = NLA_U32 },
248};
249
250static int tcindex_filter_result_init(struct tcindex_filter_result *r,
 
251				      struct net *net)
252{
253	memset(r, 0, sizeof(*r));
 
254	return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
255			     TCA_TCINDEX_POLICE);
256}
257
258static void tcindex_partial_destroy_work(struct work_struct *work)
259{
260	struct tcindex_data *p = container_of(to_rcu_work(work),
261					      struct tcindex_data,
262					      rwork);
263
 
264	kfree(p->perfect);
265	kfree(p);
 
266}
267
268static void tcindex_free_perfect_hash(struct tcindex_data *cp)
269{
270	int i;
271
272	for (i = 0; i < cp->hash; i++)
273		tcf_exts_destroy(&cp->perfect[i].exts);
274	kfree(cp->perfect);
275}
276
277static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
278{
279	int i, err = 0;
280
281	cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
282			      GFP_KERNEL);
283	if (!cp->perfect)
284		return -ENOMEM;
285
286	for (i = 0; i < cp->hash; i++) {
287		err = tcf_exts_init(&cp->perfect[i].exts, net,
288				    TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
289		if (err < 0)
290			goto errout;
 
291	}
292
293	return 0;
294
295errout:
296	tcindex_free_perfect_hash(cp);
297	return err;
298}
299
300static int
301tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
302		  u32 handle, struct tcindex_data *p,
303		  struct tcindex_filter_result *r, struct nlattr **tb,
304		  struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
305{
306	struct tcindex_filter_result new_filter_result, *old_r = r;
307	struct tcindex_data *cp = NULL, *oldp;
308	struct tcindex_filter *f = NULL; /* make gcc behave */
309	struct tcf_result cr = {};
310	int err, balloc = 0;
311	struct tcf_exts e;
312
313	err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
314	if (err < 0)
315		return err;
316	err = tcf_exts_validate(net, tp, tb, est, &e, ovr, true, extack);
317	if (err < 0)
318		goto errout;
319
320	err = -ENOMEM;
321	/* tcindex_data attributes must look atomic to classifier/lookup so
322	 * allocate new tcindex data and RCU assign it onto root. Keeping
323	 * perfect hash and hash pointers from old data.
324	 */
325	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
326	if (!cp)
327		goto errout;
328
329	cp->mask = p->mask;
330	cp->shift = p->shift;
331	cp->hash = p->hash;
332	cp->alloc_hash = p->alloc_hash;
333	cp->fall_through = p->fall_through;
334	cp->tp = tp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335
336	if (p->perfect) {
337		int i;
338
339		if (tcindex_alloc_perfect_hash(net, cp) < 0)
340			goto errout;
341		for (i = 0; i < cp->hash; i++)
 
342			cp->perfect[i].res = p->perfect[i].res;
343		balloc = 1;
344	}
345	cp->h = p->h;
346
347	err = tcindex_filter_result_init(&new_filter_result, net);
348	if (err < 0)
349		goto errout1;
350	if (old_r)
351		cr = r->res;
352
353	if (tb[TCA_TCINDEX_HASH])
354		cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
355
356	if (tb[TCA_TCINDEX_MASK])
357		cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
358
359	if (tb[TCA_TCINDEX_SHIFT])
360		cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
361
362	err = -EBUSY;
363
364	/* Hash already allocated, make sure that we still meet the
365	 * requirements for the allocated hash.
366	 */
367	if (cp->perfect) {
368		if (!valid_perfect_hash(cp) ||
369		    cp->hash > cp->alloc_hash)
370			goto errout_alloc;
371	} else if (cp->h && cp->hash != cp->alloc_hash) {
372		goto errout_alloc;
373	}
374
375	err = -EINVAL;
376	if (tb[TCA_TCINDEX_FALL_THROUGH])
377		cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
378
379	if (!cp->hash) {
380		/* Hash not specified, use perfect hash if the upper limit
381		 * of the hashing index is below the threshold.
382		 */
383		if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
384			cp->hash = (cp->mask >> cp->shift) + 1;
385		else
386			cp->hash = DEFAULT_HASH_SIZE;
387	}
388
389	if (!cp->perfect && !cp->h)
390		cp->alloc_hash = cp->hash;
391
392	/* Note: this could be as restrictive as if (handle & ~(mask >> shift))
393	 * but then, we'd fail handles that may become valid after some future
394	 * mask change. While this is extremely unlikely to ever matter,
395	 * the check below is safer (and also more backwards-compatible).
396	 */
397	if (cp->perfect || valid_perfect_hash(cp))
398		if (handle >= cp->alloc_hash)
399			goto errout_alloc;
400
401
402	err = -ENOMEM;
403	if (!cp->perfect && !cp->h) {
404		if (valid_perfect_hash(cp)) {
405			if (tcindex_alloc_perfect_hash(net, cp) < 0)
406				goto errout_alloc;
407			balloc = 1;
408		} else {
409			struct tcindex_filter __rcu **hash;
410
411			hash = kcalloc(cp->hash,
412				       sizeof(struct tcindex_filter *),
413				       GFP_KERNEL);
414
415			if (!hash)
416				goto errout_alloc;
417
418			cp->h = hash;
419			balloc = 2;
420		}
421	}
422
423	if (cp->perfect)
424		r = cp->perfect + handle;
425	else
426		r = tcindex_lookup(cp, handle) ? : &new_filter_result;
427
428	if (r == &new_filter_result) {
429		f = kzalloc(sizeof(*f), GFP_KERNEL);
430		if (!f)
431			goto errout_alloc;
432		f->key = handle;
433		f->next = NULL;
434		err = tcindex_filter_result_init(&f->result, net);
435		if (err < 0) {
436			kfree(f);
437			goto errout_alloc;
438		}
439	}
440
441	if (tb[TCA_TCINDEX_CLASSID]) {
442		cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
443		tcf_bind_filter(tp, &cr, base);
444	}
445
446	if (old_r && old_r != r) {
447		err = tcindex_filter_result_init(old_r, net);
448		if (err < 0) {
449			kfree(f);
450			goto errout_alloc;
451		}
452	}
453
454	oldp = p;
455	r->res = cr;
456	tcf_exts_change(&r->exts, &e);
457
458	rcu_assign_pointer(tp->root, cp);
459
460	if (r == &new_filter_result) {
461		struct tcindex_filter *nfp;
462		struct tcindex_filter __rcu **fp;
463
464		f->result.res = r->res;
465		tcf_exts_change(&f->result.exts, &r->exts);
466
467		fp = cp->h + (handle % cp->hash);
468		for (nfp = rtnl_dereference(*fp);
469		     nfp;
470		     fp = &nfp->next, nfp = rtnl_dereference(*fp))
471				; /* nothing */
472
473		rcu_assign_pointer(*fp, f);
474	} else {
475		tcf_exts_destroy(&new_filter_result.exts);
476	}
477
478	if (oldp)
479		tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
480	return 0;
481
482errout_alloc:
483	if (balloc == 1)
484		tcindex_free_perfect_hash(cp);
485	else if (balloc == 2)
486		kfree(cp->h);
487errout1:
488	tcf_exts_destroy(&new_filter_result.exts);
489errout:
490	kfree(cp);
491	tcf_exts_destroy(&e);
492	return err;
493}
494
495static int
496tcindex_change(struct net *net, struct sk_buff *in_skb,
497	       struct tcf_proto *tp, unsigned long base, u32 handle,
498	       struct nlattr **tca, void **arg, bool ovr,
499	       bool rtnl_held, struct netlink_ext_ack *extack)
500{
501	struct nlattr *opt = tca[TCA_OPTIONS];
502	struct nlattr *tb[TCA_TCINDEX_MAX + 1];
503	struct tcindex_data *p = rtnl_dereference(tp->root);
504	struct tcindex_filter_result *r = *arg;
505	int err;
506
507	pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
508	    "p %p,r %p,*arg %p\n",
509	    tp, handle, tca, arg, opt, p, r, arg ? *arg : NULL);
510
511	if (!opt)
512		return 0;
513
514	err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt,
515					  tcindex_policy, NULL);
516	if (err < 0)
517		return err;
518
519	return tcindex_set_parms(net, tp, base, handle, p, r, tb,
520				 tca[TCA_RATE], ovr, extack);
521}
522
523static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker,
524			 bool rtnl_held)
525{
526	struct tcindex_data *p = rtnl_dereference(tp->root);
527	struct tcindex_filter *f, *next;
528	int i;
529
530	pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
531	if (p->perfect) {
532		for (i = 0; i < p->hash; i++) {
533			if (!p->perfect[i].res.class)
534				continue;
535			if (walker->count >= walker->skip) {
536				if (walker->fn(tp, p->perfect + i, walker) < 0) {
537					walker->stop = 1;
538					return;
539				}
540			}
541			walker->count++;
542		}
543	}
544	if (!p->h)
545		return;
546	for (i = 0; i < p->hash; i++) {
547		for (f = rtnl_dereference(p->h[i]); f; f = next) {
548			next = rtnl_dereference(f->next);
549			if (walker->count >= walker->skip) {
550				if (walker->fn(tp, &f->result, walker) < 0) {
551					walker->stop = 1;
552					return;
553				}
554			}
555			walker->count++;
556		}
557	}
558}
559
560static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
561			    struct netlink_ext_ack *extack)
562{
563	struct tcindex_data *p = rtnl_dereference(tp->root);
564	int i;
565
566	pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
567
568	if (p->perfect) {
569		for (i = 0; i < p->hash; i++) {
570			struct tcindex_filter_result *r = p->perfect + i;
571
 
 
 
 
 
 
 
 
572			tcf_unbind_filter(tp, &r->res);
573			if (tcf_exts_get_net(&r->exts))
574				tcf_queue_work(&r->rwork,
575					       tcindex_destroy_rexts_work);
576			else
577				__tcindex_destroy_rexts(r);
578		}
579	}
580
581	for (i = 0; p->h && i < p->hash; i++) {
582		struct tcindex_filter *f, *next;
583		bool last;
584
585		for (f = rtnl_dereference(p->h[i]); f; f = next) {
586			next = rtnl_dereference(f->next);
587			tcindex_delete(tp, &f->result, &last, rtnl_held, NULL);
588		}
589	}
590
591	tcf_queue_work(&p->rwork, tcindex_destroy_work);
592}
593
594
595static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
596			struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
597{
598	struct tcindex_data *p = rtnl_dereference(tp->root);
599	struct tcindex_filter_result *r = fh;
600	struct nlattr *nest;
601
602	pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
603		 tp, fh, skb, t, p, r);
604	pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
605
606	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
607	if (nest == NULL)
608		goto nla_put_failure;
609
610	if (!fh) {
611		t->tcm_handle = ~0; /* whatever ... */
612		if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
613		    nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
614		    nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
615		    nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
616			goto nla_put_failure;
617		nla_nest_end(skb, nest);
618	} else {
619		if (p->perfect) {
620			t->tcm_handle = r - p->perfect;
621		} else {
622			struct tcindex_filter *f;
623			struct tcindex_filter __rcu **fp;
624			int i;
625
626			t->tcm_handle = 0;
627			for (i = 0; !t->tcm_handle && i < p->hash; i++) {
628				fp = &p->h[i];
629				for (f = rtnl_dereference(*fp);
630				     !t->tcm_handle && f;
631				     fp = &f->next, f = rtnl_dereference(*fp)) {
632					if (&f->result == r)
633						t->tcm_handle = f->key;
634				}
635			}
636		}
637		pr_debug("handle = %d\n", t->tcm_handle);
638		if (r->res.class &&
639		    nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
640			goto nla_put_failure;
641
642		if (tcf_exts_dump(skb, &r->exts) < 0)
643			goto nla_put_failure;
644		nla_nest_end(skb, nest);
645
646		if (tcf_exts_dump_stats(skb, &r->exts) < 0)
647			goto nla_put_failure;
648	}
649
650	return skb->len;
651
652nla_put_failure:
653	nla_nest_cancel(skb, nest);
654	return -1;
655}
656
657static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl)
 
658{
659	struct tcindex_filter_result *r = fh;
660
661	if (r && r->res.classid == classid)
662		r->res.class = cl;
 
 
 
 
663}
664
665static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
666	.kind		=	"tcindex",
667	.classify	=	tcindex_classify,
668	.init		=	tcindex_init,
669	.destroy	=	tcindex_destroy,
670	.get		=	tcindex_get,
671	.change		=	tcindex_change,
672	.delete		=	tcindex_delete,
673	.walk		=	tcindex_walk,
674	.dump		=	tcindex_dump,
675	.bind_class	=	tcindex_bind_class,
676	.owner		=	THIS_MODULE,
677};
678
679static int __init init_tcindex(void)
680{
681	return register_tcf_proto_ops(&cls_tcindex_ops);
682}
683
684static void __exit exit_tcindex(void)
685{
686	unregister_tcf_proto_ops(&cls_tcindex_ops);
687}
688
689module_init(init_tcindex)
690module_exit(exit_tcindex)
691MODULE_LICENSE("GPL");
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * net/sched/cls_tcindex.c	Packet classifier for skb->tc_index
  4 *
  5 * Written 1998,1999 by Werner Almesberger, EPFL ICA
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/types.h>
 10#include <linux/kernel.h>
 11#include <linux/skbuff.h>
 12#include <linux/errno.h>
 13#include <linux/slab.h>
 14#include <linux/refcount.h>
 15#include <net/act_api.h>
 16#include <net/netlink.h>
 17#include <net/pkt_cls.h>
 18#include <net/sch_generic.h>
 19
 20/*
 21 * Passing parameters to the root seems to be done more awkwardly than really
 22 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
 23 * verified. FIXME.
 24 */
 25
 26#define PERFECT_HASH_THRESHOLD	64	/* use perfect hash if not bigger */
 27#define DEFAULT_HASH_SIZE	64	/* optimized for diffserv */
 28
 29
 30struct tcindex_data;
 31
 32struct tcindex_filter_result {
 33	struct tcf_exts		exts;
 34	struct tcf_result	res;
 35	struct tcindex_data	*p;
 36	struct rcu_work		rwork;
 37};
 38
 39struct tcindex_filter {
 40	u16 key;
 41	struct tcindex_filter_result result;
 42	struct tcindex_filter __rcu *next;
 43	struct rcu_work rwork;
 44};
 45
 46
 47struct tcindex_data {
 48	struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
 49	struct tcindex_filter __rcu **h; /* imperfect hash; */
 50	struct tcf_proto *tp;
 51	u16 mask;		/* AND key with mask */
 52	u32 shift;		/* shift ANDed key to the right */
 53	u32 hash;		/* hash table size; 0 if undefined */
 54	u32 alloc_hash;		/* allocated size */
 55	u32 fall_through;	/* 0: only classify if explicit match */
 56	refcount_t refcnt;	/* a temporary refcnt for perfect hash */
 57	struct rcu_work rwork;
 58};
 59
 60static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
 61{
 62	return tcf_exts_has_actions(&r->exts) || r->res.classid;
 63}
 64
 65static void tcindex_data_get(struct tcindex_data *p)
 66{
 67	refcount_inc(&p->refcnt);
 68}
 69
 70static void tcindex_data_put(struct tcindex_data *p)
 71{
 72	if (refcount_dec_and_test(&p->refcnt)) {
 73		kfree(p->perfect);
 74		kfree(p->h);
 75		kfree(p);
 76	}
 77}
 78
 79static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
 80						    u16 key)
 81{
 82	if (p->perfect) {
 83		struct tcindex_filter_result *f = p->perfect + key;
 84
 85		return tcindex_filter_is_set(f) ? f : NULL;
 86	} else if (p->h) {
 87		struct tcindex_filter __rcu **fp;
 88		struct tcindex_filter *f;
 89
 90		fp = &p->h[key % p->hash];
 91		for (f = rcu_dereference_bh_rtnl(*fp);
 92		     f;
 93		     fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
 94			if (f->key == key)
 95				return &f->result;
 96	}
 97
 98	return NULL;
 99}
100
101
102static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
103			    struct tcf_result *res)
104{
105	struct tcindex_data *p = rcu_dereference_bh(tp->root);
106	struct tcindex_filter_result *f;
107	int key = (skb->tc_index & p->mask) >> p->shift;
108
109	pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
110		 skb, tp, res, p);
111
112	f = tcindex_lookup(p, key);
113	if (!f) {
114		struct Qdisc *q = tcf_block_q(tp->chain->block);
115
116		if (!p->fall_through)
117			return -1;
118		res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key);
119		res->class = 0;
120		pr_debug("alg 0x%x\n", res->classid);
121		return 0;
122	}
123	*res = f->res;
124	pr_debug("map 0x%x\n", res->classid);
125
126	return tcf_exts_exec(skb, &f->exts, res);
127}
128
129
130static void *tcindex_get(struct tcf_proto *tp, u32 handle)
131{
132	struct tcindex_data *p = rtnl_dereference(tp->root);
133	struct tcindex_filter_result *r;
134
135	pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
136	if (p->perfect && handle >= p->alloc_hash)
137		return NULL;
138	r = tcindex_lookup(p, handle);
139	return r && tcindex_filter_is_set(r) ? r : NULL;
140}
141
142static int tcindex_init(struct tcf_proto *tp)
143{
144	struct tcindex_data *p;
145
146	pr_debug("tcindex_init(tp %p)\n", tp);
147	p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
148	if (!p)
149		return -ENOMEM;
150
151	p->mask = 0xffff;
152	p->hash = DEFAULT_HASH_SIZE;
153	p->fall_through = 1;
154	refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */
155
156	rcu_assign_pointer(tp->root, p);
157	return 0;
158}
159
160static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
161{
162	tcf_exts_destroy(&r->exts);
163	tcf_exts_put_net(&r->exts);
164	tcindex_data_put(r->p);
165}
166
167static void tcindex_destroy_rexts_work(struct work_struct *work)
168{
169	struct tcindex_filter_result *r;
170
171	r = container_of(to_rcu_work(work),
172			 struct tcindex_filter_result,
173			 rwork);
174	rtnl_lock();
175	__tcindex_destroy_rexts(r);
176	rtnl_unlock();
177}
178
179static void __tcindex_destroy_fexts(struct tcindex_filter *f)
180{
181	tcf_exts_destroy(&f->result.exts);
182	tcf_exts_put_net(&f->result.exts);
183	kfree(f);
184}
185
186static void tcindex_destroy_fexts_work(struct work_struct *work)
187{
188	struct tcindex_filter *f = container_of(to_rcu_work(work),
189						struct tcindex_filter,
190						rwork);
191
192	rtnl_lock();
193	__tcindex_destroy_fexts(f);
194	rtnl_unlock();
195}
196
197static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
198			  bool rtnl_held, struct netlink_ext_ack *extack)
199{
200	struct tcindex_data *p = rtnl_dereference(tp->root);
201	struct tcindex_filter_result *r = arg;
202	struct tcindex_filter __rcu **walk;
203	struct tcindex_filter *f = NULL;
204
205	pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
206	if (p->perfect) {
207		if (!r->res.class)
208			return -ENOENT;
209	} else {
210		int i;
211
212		for (i = 0; i < p->hash; i++) {
213			walk = p->h + i;
214			for (f = rtnl_dereference(*walk); f;
215			     walk = &f->next, f = rtnl_dereference(*walk)) {
216				if (&f->result == r)
217					goto found;
218			}
219		}
220		return -ENOENT;
221
222found:
223		rcu_assign_pointer(*walk, rtnl_dereference(f->next));
224	}
225	tcf_unbind_filter(tp, &r->res);
226	/* all classifiers are required to call tcf_exts_destroy() after rcu
227	 * grace period, since converted-to-rcu actions are relying on that
228	 * in cleanup() callback
229	 */
230	if (f) {
231		if (tcf_exts_get_net(&f->result.exts))
232			tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work);
233		else
234			__tcindex_destroy_fexts(f);
235	} else {
236		tcindex_data_get(p);
237
238		if (tcf_exts_get_net(&r->exts))
239			tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
240		else
241			__tcindex_destroy_rexts(r);
242	}
243
244	*last = false;
245	return 0;
246}
247
248static void tcindex_destroy_work(struct work_struct *work)
249{
250	struct tcindex_data *p = container_of(to_rcu_work(work),
251					      struct tcindex_data,
252					      rwork);
253
254	tcindex_data_put(p);
 
 
255}
256
257static inline int
258valid_perfect_hash(struct tcindex_data *p)
259{
260	return  p->hash > (p->mask >> p->shift);
261}
262
263static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
264	[TCA_TCINDEX_HASH]		= { .type = NLA_U32 },
265	[TCA_TCINDEX_MASK]		= { .type = NLA_U16 },
266	[TCA_TCINDEX_SHIFT]		= { .type = NLA_U32 },
267	[TCA_TCINDEX_FALL_THROUGH]	= { .type = NLA_U32 },
268	[TCA_TCINDEX_CLASSID]		= { .type = NLA_U32 },
269};
270
271static int tcindex_filter_result_init(struct tcindex_filter_result *r,
272				      struct tcindex_data *p,
273				      struct net *net)
274{
275	memset(r, 0, sizeof(*r));
276	r->p = p;
277	return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
278			     TCA_TCINDEX_POLICE);
279}
280
281static void tcindex_partial_destroy_work(struct work_struct *work)
282{
283	struct tcindex_data *p = container_of(to_rcu_work(work),
284					      struct tcindex_data,
285					      rwork);
286
287	rtnl_lock();
288	kfree(p->perfect);
289	kfree(p);
290	rtnl_unlock();
291}
292
293static void tcindex_free_perfect_hash(struct tcindex_data *cp)
294{
295	int i;
296
297	for (i = 0; i < cp->hash; i++)
298		tcf_exts_destroy(&cp->perfect[i].exts);
299	kfree(cp->perfect);
300}
301
302static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
303{
304	int i, err = 0;
305
306	cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
307			      GFP_KERNEL);
308	if (!cp->perfect)
309		return -ENOMEM;
310
311	for (i = 0; i < cp->hash; i++) {
312		err = tcf_exts_init(&cp->perfect[i].exts, net,
313				    TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
314		if (err < 0)
315			goto errout;
316		cp->perfect[i].p = cp;
317	}
318
319	return 0;
320
321errout:
322	tcindex_free_perfect_hash(cp);
323	return err;
324}
325
326static int
327tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
328		  u32 handle, struct tcindex_data *p,
329		  struct tcindex_filter_result *r, struct nlattr **tb,
330		  struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
331{
332	struct tcindex_filter_result new_filter_result, *old_r = r;
333	struct tcindex_data *cp = NULL, *oldp;
334	struct tcindex_filter *f = NULL; /* make gcc behave */
335	struct tcf_result cr = {};
336	int err, balloc = 0;
337	struct tcf_exts e;
338
339	err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
340	if (err < 0)
341		return err;
342	err = tcf_exts_validate(net, tp, tb, est, &e, ovr, true, extack);
343	if (err < 0)
344		goto errout;
345
346	err = -ENOMEM;
347	/* tcindex_data attributes must look atomic to classifier/lookup so
348	 * allocate new tcindex data and RCU assign it onto root. Keeping
349	 * perfect hash and hash pointers from old data.
350	 */
351	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
352	if (!cp)
353		goto errout;
354
355	cp->mask = p->mask;
356	cp->shift = p->shift;
357	cp->hash = p->hash;
358	cp->alloc_hash = p->alloc_hash;
359	cp->fall_through = p->fall_through;
360	cp->tp = tp;
361	refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */
362
363	if (tb[TCA_TCINDEX_HASH])
364		cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
365
366	if (tb[TCA_TCINDEX_MASK])
367		cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
368
369	if (tb[TCA_TCINDEX_SHIFT])
370		cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
371
372	if (!cp->hash) {
373		/* Hash not specified, use perfect hash if the upper limit
374		 * of the hashing index is below the threshold.
375		 */
376		if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
377			cp->hash = (cp->mask >> cp->shift) + 1;
378		else
379			cp->hash = DEFAULT_HASH_SIZE;
380	}
381
382	if (p->perfect) {
383		int i;
384
385		if (tcindex_alloc_perfect_hash(net, cp) < 0)
386			goto errout;
387		cp->alloc_hash = cp->hash;
388		for (i = 0; i < min(cp->hash, p->hash); i++)
389			cp->perfect[i].res = p->perfect[i].res;
390		balloc = 1;
391	}
392	cp->h = p->h;
393
394	err = tcindex_filter_result_init(&new_filter_result, cp, net);
395	if (err < 0)
396		goto errout_alloc;
397	if (old_r)
398		cr = r->res;
399
 
 
 
 
 
 
 
 
 
400	err = -EBUSY;
401
402	/* Hash already allocated, make sure that we still meet the
403	 * requirements for the allocated hash.
404	 */
405	if (cp->perfect) {
406		if (!valid_perfect_hash(cp) ||
407		    cp->hash > cp->alloc_hash)
408			goto errout_alloc;
409	} else if (cp->h && cp->hash != cp->alloc_hash) {
410		goto errout_alloc;
411	}
412
413	err = -EINVAL;
414	if (tb[TCA_TCINDEX_FALL_THROUGH])
415		cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
416
 
 
 
 
 
 
 
 
 
 
417	if (!cp->perfect && !cp->h)
418		cp->alloc_hash = cp->hash;
419
420	/* Note: this could be as restrictive as if (handle & ~(mask >> shift))
421	 * but then, we'd fail handles that may become valid after some future
422	 * mask change. While this is extremely unlikely to ever matter,
423	 * the check below is safer (and also more backwards-compatible).
424	 */
425	if (cp->perfect || valid_perfect_hash(cp))
426		if (handle >= cp->alloc_hash)
427			goto errout_alloc;
428
429
430	err = -ENOMEM;
431	if (!cp->perfect && !cp->h) {
432		if (valid_perfect_hash(cp)) {
433			if (tcindex_alloc_perfect_hash(net, cp) < 0)
434				goto errout_alloc;
435			balloc = 1;
436		} else {
437			struct tcindex_filter __rcu **hash;
438
439			hash = kcalloc(cp->hash,
440				       sizeof(struct tcindex_filter *),
441				       GFP_KERNEL);
442
443			if (!hash)
444				goto errout_alloc;
445
446			cp->h = hash;
447			balloc = 2;
448		}
449	}
450
451	if (cp->perfect)
452		r = cp->perfect + handle;
453	else
454		r = tcindex_lookup(cp, handle) ? : &new_filter_result;
455
456	if (r == &new_filter_result) {
457		f = kzalloc(sizeof(*f), GFP_KERNEL);
458		if (!f)
459			goto errout_alloc;
460		f->key = handle;
461		f->next = NULL;
462		err = tcindex_filter_result_init(&f->result, cp, net);
463		if (err < 0) {
464			kfree(f);
465			goto errout_alloc;
466		}
467	}
468
469	if (tb[TCA_TCINDEX_CLASSID]) {
470		cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
471		tcf_bind_filter(tp, &cr, base);
472	}
473
474	if (old_r && old_r != r) {
475		err = tcindex_filter_result_init(old_r, cp, net);
476		if (err < 0) {
477			kfree(f);
478			goto errout_alloc;
479		}
480	}
481
482	oldp = p;
483	r->res = cr;
484	tcf_exts_change(&r->exts, &e);
485
486	rcu_assign_pointer(tp->root, cp);
487
488	if (r == &new_filter_result) {
489		struct tcindex_filter *nfp;
490		struct tcindex_filter __rcu **fp;
491
492		f->result.res = r->res;
493		tcf_exts_change(&f->result.exts, &r->exts);
494
495		fp = cp->h + (handle % cp->hash);
496		for (nfp = rtnl_dereference(*fp);
497		     nfp;
498		     fp = &nfp->next, nfp = rtnl_dereference(*fp))
499				; /* nothing */
500
501		rcu_assign_pointer(*fp, f);
502	} else {
503		tcf_exts_destroy(&new_filter_result.exts);
504	}
505
506	if (oldp)
507		tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
508	return 0;
509
510errout_alloc:
511	if (balloc == 1)
512		tcindex_free_perfect_hash(cp);
513	else if (balloc == 2)
514		kfree(cp->h);
 
515	tcf_exts_destroy(&new_filter_result.exts);
516errout:
517	kfree(cp);
518	tcf_exts_destroy(&e);
519	return err;
520}
521
522static int
523tcindex_change(struct net *net, struct sk_buff *in_skb,
524	       struct tcf_proto *tp, unsigned long base, u32 handle,
525	       struct nlattr **tca, void **arg, bool ovr,
526	       bool rtnl_held, struct netlink_ext_ack *extack)
527{
528	struct nlattr *opt = tca[TCA_OPTIONS];
529	struct nlattr *tb[TCA_TCINDEX_MAX + 1];
530	struct tcindex_data *p = rtnl_dereference(tp->root);
531	struct tcindex_filter_result *r = *arg;
532	int err;
533
534	pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
535	    "p %p,r %p,*arg %p\n",
536	    tp, handle, tca, arg, opt, p, r, *arg);
537
538	if (!opt)
539		return 0;
540
541	err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt,
542					  tcindex_policy, NULL);
543	if (err < 0)
544		return err;
545
546	return tcindex_set_parms(net, tp, base, handle, p, r, tb,
547				 tca[TCA_RATE], ovr, extack);
548}
549
550static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker,
551			 bool rtnl_held)
552{
553	struct tcindex_data *p = rtnl_dereference(tp->root);
554	struct tcindex_filter *f, *next;
555	int i;
556
557	pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
558	if (p->perfect) {
559		for (i = 0; i < p->hash; i++) {
560			if (!p->perfect[i].res.class)
561				continue;
562			if (walker->count >= walker->skip) {
563				if (walker->fn(tp, p->perfect + i, walker) < 0) {
564					walker->stop = 1;
565					return;
566				}
567			}
568			walker->count++;
569		}
570	}
571	if (!p->h)
572		return;
573	for (i = 0; i < p->hash; i++) {
574		for (f = rtnl_dereference(p->h[i]); f; f = next) {
575			next = rtnl_dereference(f->next);
576			if (walker->count >= walker->skip) {
577				if (walker->fn(tp, &f->result, walker) < 0) {
578					walker->stop = 1;
579					return;
580				}
581			}
582			walker->count++;
583		}
584	}
585}
586
587static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
588			    struct netlink_ext_ack *extack)
589{
590	struct tcindex_data *p = rtnl_dereference(tp->root);
591	int i;
592
593	pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
594
595	if (p->perfect) {
596		for (i = 0; i < p->hash; i++) {
597			struct tcindex_filter_result *r = p->perfect + i;
598
599			/* tcf_queue_work() does not guarantee the ordering we
600			 * want, so we have to take this refcnt temporarily to
601			 * ensure 'p' is freed after all tcindex_filter_result
602			 * here. Imperfect hash does not need this, because it
603			 * uses linked lists rather than an array.
604			 */
605			tcindex_data_get(p);
606
607			tcf_unbind_filter(tp, &r->res);
608			if (tcf_exts_get_net(&r->exts))
609				tcf_queue_work(&r->rwork,
610					       tcindex_destroy_rexts_work);
611			else
612				__tcindex_destroy_rexts(r);
613		}
614	}
615
616	for (i = 0; p->h && i < p->hash; i++) {
617		struct tcindex_filter *f, *next;
618		bool last;
619
620		for (f = rtnl_dereference(p->h[i]); f; f = next) {
621			next = rtnl_dereference(f->next);
622			tcindex_delete(tp, &f->result, &last, rtnl_held, NULL);
623		}
624	}
625
626	tcf_queue_work(&p->rwork, tcindex_destroy_work);
627}
628
629
630static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
631			struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
632{
633	struct tcindex_data *p = rtnl_dereference(tp->root);
634	struct tcindex_filter_result *r = fh;
635	struct nlattr *nest;
636
637	pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
638		 tp, fh, skb, t, p, r);
639	pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
640
641	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
642	if (nest == NULL)
643		goto nla_put_failure;
644
645	if (!fh) {
646		t->tcm_handle = ~0; /* whatever ... */
647		if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
648		    nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
649		    nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
650		    nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
651			goto nla_put_failure;
652		nla_nest_end(skb, nest);
653	} else {
654		if (p->perfect) {
655			t->tcm_handle = r - p->perfect;
656		} else {
657			struct tcindex_filter *f;
658			struct tcindex_filter __rcu **fp;
659			int i;
660
661			t->tcm_handle = 0;
662			for (i = 0; !t->tcm_handle && i < p->hash; i++) {
663				fp = &p->h[i];
664				for (f = rtnl_dereference(*fp);
665				     !t->tcm_handle && f;
666				     fp = &f->next, f = rtnl_dereference(*fp)) {
667					if (&f->result == r)
668						t->tcm_handle = f->key;
669				}
670			}
671		}
672		pr_debug("handle = %d\n", t->tcm_handle);
673		if (r->res.class &&
674		    nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
675			goto nla_put_failure;
676
677		if (tcf_exts_dump(skb, &r->exts) < 0)
678			goto nla_put_failure;
679		nla_nest_end(skb, nest);
680
681		if (tcf_exts_dump_stats(skb, &r->exts) < 0)
682			goto nla_put_failure;
683	}
684
685	return skb->len;
686
687nla_put_failure:
688	nla_nest_cancel(skb, nest);
689	return -1;
690}
691
692static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl,
693			       void *q, unsigned long base)
694{
695	struct tcindex_filter_result *r = fh;
696
697	if (r && r->res.classid == classid) {
698		if (cl)
699			__tcf_bind_filter(q, &r->res, base);
700		else
701			__tcf_unbind_filter(q, &r->res);
702	}
703}
704
705static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
706	.kind		=	"tcindex",
707	.classify	=	tcindex_classify,
708	.init		=	tcindex_init,
709	.destroy	=	tcindex_destroy,
710	.get		=	tcindex_get,
711	.change		=	tcindex_change,
712	.delete		=	tcindex_delete,
713	.walk		=	tcindex_walk,
714	.dump		=	tcindex_dump,
715	.bind_class	=	tcindex_bind_class,
716	.owner		=	THIS_MODULE,
717};
718
719static int __init init_tcindex(void)
720{
721	return register_tcf_proto_ops(&cls_tcindex_ops);
722}
723
724static void __exit exit_tcindex(void)
725{
726	unregister_tcf_proto_ops(&cls_tcindex_ops);
727}
728
729module_init(init_tcindex)
730module_exit(exit_tcindex)
731MODULE_LICENSE("GPL");