Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * net/sched/cls_route.c	ROUTE4 classifier.
  3 *
  4 *		This program is free software; you can redistribute it and/or
  5 *		modify it under the terms of the GNU General Public License
  6 *		as published by the Free Software Foundation; either version
  7 *		2 of the License, or (at your option) any later version.
  8 *
  9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 10 */
 11
 12#include <linux/module.h>
 13#include <linux/slab.h>
 14#include <linux/types.h>
 15#include <linux/kernel.h>
 16#include <linux/string.h>
 17#include <linux/errno.h>
 18#include <linux/skbuff.h>
 19#include <net/dst.h>
 20#include <net/route.h>
 21#include <net/netlink.h>
 22#include <net/act_api.h>
 23#include <net/pkt_cls.h>
 24
 25/*
 26 * 1. For now we assume that route tags < 256.
 27 *    It allows to use direct table lookups, instead of hash tables.
 28 * 2. For now we assume that "from TAG" and "fromdev DEV" statements
 29 *    are mutually  exclusive.
 30 * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
 31 */
 
 32struct route4_fastmap {
 33	struct route4_filter		*filter;
 34	u32				id;
 35	int				iif;
 36};
 37
 38struct route4_head {
 39	struct route4_fastmap		fastmap[16];
 40	struct route4_bucket __rcu	*table[256 + 1];
 41	struct rcu_head			rcu;
 42};
 43
 44struct route4_bucket {
 45	/* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
 46	struct route4_filter __rcu	*ht[16 + 16 + 1];
 47	struct rcu_head			rcu;
 48};
 49
 50struct route4_filter {
 51	struct route4_filter __rcu	*next;
 52	u32			id;
 53	int			iif;
 54
 55	struct tcf_result	res;
 56	struct tcf_exts		exts;
 57	u32			handle;
 58	struct route4_bucket	*bkt;
 59	struct tcf_proto	*tp;
 60	struct rcu_head		rcu;
 61};
 62
 63#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
 64
 
 
 
 
 
 65static inline int route4_fastmap_hash(u32 id, int iif)
 66{
 67	return id & 0xF;
 68}
 69
 70static DEFINE_SPINLOCK(fastmap_lock);
 71static void
 72route4_reset_fastmap(struct route4_head *head)
 73{
 74	spin_lock_bh(&fastmap_lock);
 
 
 75	memset(head->fastmap, 0, sizeof(head->fastmap));
 76	spin_unlock_bh(&fastmap_lock);
 77}
 78
 79static void
 80route4_set_fastmap(struct route4_head *head, u32 id, int iif,
 81		   struct route4_filter *f)
 82{
 83	int h = route4_fastmap_hash(id, iif);
 84
 85	/* fastmap updates must look atomic to aling id, iff, filter */
 86	spin_lock_bh(&fastmap_lock);
 87	head->fastmap[h].id = id;
 88	head->fastmap[h].iif = iif;
 89	head->fastmap[h].filter = f;
 90	spin_unlock_bh(&fastmap_lock);
 91}
 92
 93static inline int route4_hash_to(u32 id)
 94{
 95	return id & 0xFF;
 96}
 97
 98static inline int route4_hash_from(u32 id)
 99{
100	return (id >> 16) & 0xF;
101}
102
103static inline int route4_hash_iif(int iif)
104{
105	return 16 + ((iif >> 16) & 0xF);
106}
107
108static inline int route4_hash_wild(void)
109{
110	return 32;
111}
112
113#define ROUTE4_APPLY_RESULT()					\
114{								\
115	*res = f->res;						\
116	if (tcf_exts_is_available(&f->exts)) {			\
117		int r = tcf_exts_exec(skb, &f->exts, res);	\
118		if (r < 0) {					\
119			dont_cache = 1;				\
120			continue;				\
121		}						\
122		return r;					\
123	} else if (!dont_cache)					\
124		route4_set_fastmap(head, id, iif, f);		\
125	return 0;						\
126}
127
128static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp,
129			   struct tcf_result *res)
130{
131	struct route4_head *head = rcu_dereference_bh(tp->root);
132	struct dst_entry *dst;
133	struct route4_bucket *b;
134	struct route4_filter *f;
135	u32 id, h;
136	int iif, dont_cache = 0;
137
138	dst = skb_dst(skb);
139	if (!dst)
140		goto failure;
141
142	id = dst->tclassid;
143	if (head == NULL)
144		goto old_method;
145
146	iif = inet_iif(skb);
147
148	h = route4_fastmap_hash(id, iif);
149
150	spin_lock(&fastmap_lock);
151	if (id == head->fastmap[h].id &&
152	    iif == head->fastmap[h].iif &&
153	    (f = head->fastmap[h].filter) != NULL) {
154		if (f == ROUTE4_FAILURE) {
155			spin_unlock(&fastmap_lock);
156			goto failure;
157		}
158
159		*res = f->res;
160		spin_unlock(&fastmap_lock);
161		return 0;
162	}
163	spin_unlock(&fastmap_lock);
164
165	h = route4_hash_to(id);
166
167restart:
168	b = rcu_dereference_bh(head->table[h]);
169	if (b) {
170		for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]);
171		     f;
172		     f = rcu_dereference_bh(f->next))
173			if (f->id == id)
174				ROUTE4_APPLY_RESULT();
175
176		for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]);
177		     f;
178		     f = rcu_dereference_bh(f->next))
179			if (f->iif == iif)
180				ROUTE4_APPLY_RESULT();
181
182		for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]);
183		     f;
184		     f = rcu_dereference_bh(f->next))
185			ROUTE4_APPLY_RESULT();
 
186	}
187	if (h < 256) {
188		h = 256;
189		id &= ~0xFFFF;
190		goto restart;
191	}
192
193	if (!dont_cache)
194		route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
195failure:
196	return -1;
197
198old_method:
199	if (id && (TC_H_MAJ(id) == 0 ||
200		   !(TC_H_MAJ(id^tp->q->handle)))) {
201		res->classid = id;
202		res->class = 0;
203		return 0;
204	}
205	return -1;
206}
207
208static inline u32 to_hash(u32 id)
209{
210	u32 h = id & 0xFF;
211
212	if (id & 0x8000)
213		h += 256;
214	return h;
215}
216
217static inline u32 from_hash(u32 id)
218{
219	id &= 0xFFFF;
220	if (id == 0xFFFF)
221		return 32;
222	if (!(id & 0x8000)) {
223		if (id > 255)
224			return 256;
225		return id & 0xF;
226	}
227	return 16 + (id & 0xF);
228}
229
230static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
231{
232	struct route4_head *head = rtnl_dereference(tp->root);
233	struct route4_bucket *b;
234	struct route4_filter *f;
235	unsigned int h1, h2;
236
237	if (!head)
238		return 0;
239
240	h1 = to_hash(handle);
241	if (h1 > 256)
242		return 0;
243
244	h2 = from_hash(handle >> 16);
245	if (h2 > 32)
246		return 0;
247
248	b = rtnl_dereference(head->table[h1]);
249	if (b) {
250		for (f = rtnl_dereference(b->ht[h2]);
251		     f;
252		     f = rtnl_dereference(f->next))
253			if (f->handle == handle)
254				return (unsigned long)f;
255	}
256	return 0;
257}
258
259static int route4_init(struct tcf_proto *tp)
260{
261	struct route4_head *head;
262
263	head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
264	if (head == NULL)
265		return -ENOBUFS;
266
267	rcu_assign_pointer(tp->root, head);
 
268	return 0;
269}
270
271static void
272route4_delete_filter(struct rcu_head *head)
273{
274	struct route4_filter *f = container_of(head, struct route4_filter, rcu);
275
276	tcf_exts_destroy(&f->exts);
277	kfree(f);
278}
279
280static bool route4_destroy(struct tcf_proto *tp, bool force)
281{
282	struct route4_head *head = rtnl_dereference(tp->root);
283	int h1, h2;
284
285	if (head == NULL)
286		return true;
287
288	if (!force) {
289		for (h1 = 0; h1 <= 256; h1++) {
290			if (rcu_access_pointer(head->table[h1]))
291				return false;
292		}
293	}
294
295	for (h1 = 0; h1 <= 256; h1++) {
296		struct route4_bucket *b;
297
298		b = rtnl_dereference(head->table[h1]);
299		if (b) {
300			for (h2 = 0; h2 <= 32; h2++) {
301				struct route4_filter *f;
302
303				while ((f = rtnl_dereference(b->ht[h2])) != NULL) {
304					struct route4_filter *next;
305
306					next = rtnl_dereference(f->next);
307					RCU_INIT_POINTER(b->ht[h2], next);
308					tcf_unbind_filter(tp, &f->res);
309					call_rcu(&f->rcu, route4_delete_filter);
310				}
311			}
312			RCU_INIT_POINTER(head->table[h1], NULL);
313			kfree_rcu(b, rcu);
314		}
315	}
316	RCU_INIT_POINTER(tp->root, NULL);
317	kfree_rcu(head, rcu);
318	return true;
319}
320
321static int route4_delete(struct tcf_proto *tp, unsigned long arg)
322{
323	struct route4_head *head = rtnl_dereference(tp->root);
324	struct route4_filter *f = (struct route4_filter *)arg;
325	struct route4_filter __rcu **fp;
326	struct route4_filter *nf;
327	struct route4_bucket *b;
328	unsigned int h = 0;
 
329	int i;
330
331	if (!head || !f)
332		return -EINVAL;
333
334	h = f->handle;
335	b = f->bkt;
336
337	fp = &b->ht[from_hash(h >> 16)];
338	for (nf = rtnl_dereference(*fp); nf;
339	     fp = &nf->next, nf = rtnl_dereference(*fp)) {
340		if (nf == f) {
341			/* unlink it */
342			RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
343
344			/* Remove any fastmap lookups that might ref filter
345			 * notice we unlink'd the filter so we can't get it
346			 * back in the fastmap.
347			 */
348			route4_reset_fastmap(head);
349
350			/* Delete it */
351			tcf_unbind_filter(tp, &f->res);
352			call_rcu(&f->rcu, route4_delete_filter);
353
354			/* Strip RTNL protected tree */
355			for (i = 0; i <= 32; i++) {
356				struct route4_filter *rt;
357
358				rt = rtnl_dereference(b->ht[i]);
359				if (rt)
360					return 0;
361			}
362
363			/* OK, session has no flows */
364			RCU_INIT_POINTER(head->table[to_hash(h)], NULL);
365			kfree_rcu(b, rcu);
 
366
 
367			return 0;
368		}
369	}
370	return 0;
371}
372
373static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
374	[TCA_ROUTE4_CLASSID]	= { .type = NLA_U32 },
375	[TCA_ROUTE4_TO]		= { .type = NLA_U32 },
376	[TCA_ROUTE4_FROM]	= { .type = NLA_U32 },
377	[TCA_ROUTE4_IIF]	= { .type = NLA_U32 },
378};
379
380static int route4_set_parms(struct net *net, struct tcf_proto *tp,
381			    unsigned long base, struct route4_filter *f,
382			    u32 handle, struct route4_head *head,
383			    struct nlattr **tb, struct nlattr *est, int new,
384			    bool ovr)
385{
386	int err;
387	u32 id = 0, to = 0, nhandle = 0x8000;
388	struct route4_filter *fp;
389	unsigned int h1;
390	struct route4_bucket *b;
391	struct tcf_exts e;
392
393	tcf_exts_init(&e, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
394	err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
395	if (err < 0)
396		return err;
397
398	err = -EINVAL;
399	if (tb[TCA_ROUTE4_TO]) {
400		if (new && handle & 0x8000)
401			goto errout;
402		to = nla_get_u32(tb[TCA_ROUTE4_TO]);
403		if (to > 0xFF)
404			goto errout;
405		nhandle = to;
406	}
407
408	if (tb[TCA_ROUTE4_FROM]) {
409		if (tb[TCA_ROUTE4_IIF])
410			goto errout;
411		id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
412		if (id > 0xFF)
413			goto errout;
414		nhandle |= id << 16;
415	} else if (tb[TCA_ROUTE4_IIF]) {
416		id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
417		if (id > 0x7FFF)
418			goto errout;
419		nhandle |= (id | 0x8000) << 16;
420	} else
421		nhandle |= 0xFFFF << 16;
422
423	if (handle && new) {
424		nhandle |= handle & 0x7F00;
425		if (nhandle != handle)
426			goto errout;
427	}
428
429	h1 = to_hash(nhandle);
430	b = rtnl_dereference(head->table[h1]);
431	if (!b) {
432		err = -ENOBUFS;
433		b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
434		if (b == NULL)
435			goto errout;
436
437		rcu_assign_pointer(head->table[h1], b);
 
 
438	} else {
439		unsigned int h2 = from_hash(nhandle >> 16);
440
441		err = -EEXIST;
442		for (fp = rtnl_dereference(b->ht[h2]);
443		     fp;
444		     fp = rtnl_dereference(fp->next))
445			if (fp->handle == f->handle)
446				goto errout;
447	}
448
 
449	if (tb[TCA_ROUTE4_TO])
450		f->id = to;
451
452	if (tb[TCA_ROUTE4_FROM])
453		f->id = to | id<<16;
454	else if (tb[TCA_ROUTE4_IIF])
455		f->iif = id;
456
457	f->handle = nhandle;
458	f->bkt = b;
459	f->tp = tp;
460
461	if (tb[TCA_ROUTE4_CLASSID]) {
462		f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
463		tcf_bind_filter(tp, &f->res, base);
464	}
465
466	tcf_exts_change(tp, &f->exts, &e);
467
468	return 0;
469errout:
470	tcf_exts_destroy(&e);
471	return err;
472}
473
474static int route4_change(struct net *net, struct sk_buff *in_skb,
475		       struct tcf_proto *tp, unsigned long base,
476		       u32 handle,
477		       struct nlattr **tca,
478		       unsigned long *arg, bool ovr)
479{
480	struct route4_head *head = rtnl_dereference(tp->root);
481	struct route4_filter __rcu **fp;
482	struct route4_filter *fold, *f1, *pfp, *f = NULL;
483	struct route4_bucket *b;
484	struct nlattr *opt = tca[TCA_OPTIONS];
485	struct nlattr *tb[TCA_ROUTE4_MAX + 1];
486	unsigned int h, th;
 
487	int err;
488	bool new = true;
489
490	if (opt == NULL)
491		return handle ? -EINVAL : 0;
492
493	err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy);
494	if (err < 0)
495		return err;
496
497	fold = (struct route4_filter *)*arg;
498	if (fold && handle && fold->handle != handle)
 
499			return -EINVAL;
500
 
 
 
 
 
 
 
 
 
 
 
501	err = -ENOBUFS;
502	f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
503	if (!f)
504		goto errout;
 
505
506	tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
507	if (fold) {
508		f->id = fold->id;
509		f->iif = fold->iif;
510		f->res = fold->res;
511		f->handle = fold->handle;
512
513		f->tp = fold->tp;
514		f->bkt = fold->bkt;
515		new = false;
516	}
517
518	err = route4_set_parms(net, tp, base, f, handle, head, tb,
519			       tca[TCA_RATE], new, ovr);
 
 
 
 
520	if (err < 0)
521		goto errout;
522
 
523	h = from_hash(f->handle >> 16);
524	fp = &f->bkt->ht[h];
525	for (pfp = rtnl_dereference(*fp);
526	     (f1 = rtnl_dereference(*fp)) != NULL;
527	     fp = &f1->next)
528		if (f->handle < f1->handle)
529			break;
530
531	netif_keep_dst(qdisc_dev(tp->q));
532	rcu_assign_pointer(f->next, f1);
533	rcu_assign_pointer(*fp, f);
534
535	if (fold && fold->handle && f->handle != fold->handle) {
536		th = to_hash(fold->handle);
537		h = from_hash(fold->handle >> 16);
538		b = rtnl_dereference(head->table[th]);
539		if (b) {
540			fp = &b->ht[h];
541			for (pfp = rtnl_dereference(*fp); pfp;
542			     fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
543				if (pfp == f) {
544					*fp = f->next;
545					break;
546				}
547			}
548		}
549	}
 
550
551	route4_reset_fastmap(head);
552	*arg = (unsigned long)f;
553	if (fold) {
554		tcf_unbind_filter(tp, &fold->res);
555		call_rcu(&fold->rcu, route4_delete_filter);
556	}
557	return 0;
558
559errout:
560	kfree(f);
561	return err;
562}
563
564static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
565{
566	struct route4_head *head = rtnl_dereference(tp->root);
567	unsigned int h, h1;
568
569	if (head == NULL)
570		arg->stop = 1;
571
572	if (arg->stop)
573		return;
574
575	for (h = 0; h <= 256; h++) {
576		struct route4_bucket *b = rtnl_dereference(head->table[h]);
577
578		if (b) {
579			for (h1 = 0; h1 <= 32; h1++) {
580				struct route4_filter *f;
581
582				for (f = rtnl_dereference(b->ht[h1]);
583				     f;
584				     f = rtnl_dereference(f->next)) {
585					if (arg->count < arg->skip) {
586						arg->count++;
587						continue;
588					}
589					if (arg->fn(tp, (unsigned long)f, arg) < 0) {
590						arg->stop = 1;
591						return;
592					}
593					arg->count++;
594				}
595			}
596		}
597	}
598}
599
600static int route4_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
601		       struct sk_buff *skb, struct tcmsg *t)
602{
603	struct route4_filter *f = (struct route4_filter *)fh;
 
604	struct nlattr *nest;
605	u32 id;
606
607	if (f == NULL)
608		return skb->len;
609
610	t->tcm_handle = f->handle;
611
612	nest = nla_nest_start(skb, TCA_OPTIONS);
613	if (nest == NULL)
614		goto nla_put_failure;
615
616	if (!(f->handle & 0x8000)) {
617		id = f->id & 0xFF;
618		if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
619			goto nla_put_failure;
620	}
621	if (f->handle & 0x80000000) {
622		if ((f->handle >> 16) != 0xFFFF &&
623		    nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
624			goto nla_put_failure;
625	} else {
626		id = f->id >> 16;
627		if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
628			goto nla_put_failure;
629	}
630	if (f->res.classid &&
631	    nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
632		goto nla_put_failure;
633
634	if (tcf_exts_dump(skb, &f->exts) < 0)
635		goto nla_put_failure;
636
637	nla_nest_end(skb, nest);
638
639	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
640		goto nla_put_failure;
641
642	return skb->len;
643
644nla_put_failure:
645	nla_nest_cancel(skb, nest);
646	return -1;
647}
648
649static struct tcf_proto_ops cls_route4_ops __read_mostly = {
650	.kind		=	"route",
651	.classify	=	route4_classify,
652	.init		=	route4_init,
653	.destroy	=	route4_destroy,
654	.get		=	route4_get,
 
655	.change		=	route4_change,
656	.delete		=	route4_delete,
657	.walk		=	route4_walk,
658	.dump		=	route4_dump,
659	.owner		=	THIS_MODULE,
660};
661
662static int __init init_route4(void)
663{
664	return register_tcf_proto_ops(&cls_route4_ops);
665}
666
667static void __exit exit_route4(void)
668{
669	unregister_tcf_proto_ops(&cls_route4_ops);
670}
671
672module_init(init_route4)
673module_exit(exit_route4)
674MODULE_LICENSE("GPL");
v3.1
  1/*
  2 * net/sched/cls_route.c	ROUTE4 classifier.
  3 *
  4 *		This program is free software; you can redistribute it and/or
  5 *		modify it under the terms of the GNU General Public License
  6 *		as published by the Free Software Foundation; either version
  7 *		2 of the License, or (at your option) any later version.
  8 *
  9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 10 */
 11
 12#include <linux/module.h>
 13#include <linux/slab.h>
 14#include <linux/types.h>
 15#include <linux/kernel.h>
 16#include <linux/string.h>
 17#include <linux/errno.h>
 18#include <linux/skbuff.h>
 19#include <net/dst.h>
 20#include <net/route.h>
 21#include <net/netlink.h>
 22#include <net/act_api.h>
 23#include <net/pkt_cls.h>
 24
 25/*
 26 * 1. For now we assume that route tags < 256.
 27 *    It allows to use direct table lookups, instead of hash tables.
 28 * 2. For now we assume that "from TAG" and "fromdev DEV" statements
 29 *    are mutually  exclusive.
 30 * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
 31 */
 32
 33struct route4_fastmap {
 34	struct route4_filter	*filter;
 35	u32			id;
 36	int			iif;
 37};
 38
 39struct route4_head {
 40	struct route4_fastmap	fastmap[16];
 41	struct route4_bucket	*table[256 + 1];
 
 42};
 43
 44struct route4_bucket {
 45	/* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
 46	struct route4_filter	*ht[16 + 16 + 1];
 
 47};
 48
 49struct route4_filter {
 50	struct route4_filter	*next;
 51	u32			id;
 52	int			iif;
 53
 54	struct tcf_result	res;
 55	struct tcf_exts		exts;
 56	u32			handle;
 57	struct route4_bucket	*bkt;
 
 
 58};
 59
 60#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
 61
 62static const struct tcf_ext_map route_ext_map = {
 63	.police = TCA_ROUTE4_POLICE,
 64	.action = TCA_ROUTE4_ACT
 65};
 66
 67static inline int route4_fastmap_hash(u32 id, int iif)
 68{
 69	return id & 0xF;
 70}
 71
 
 72static void
 73route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
 74{
 75	spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
 76
 77	spin_lock_bh(root_lock);
 78	memset(head->fastmap, 0, sizeof(head->fastmap));
 79	spin_unlock_bh(root_lock);
 80}
 81
 82static void
 83route4_set_fastmap(struct route4_head *head, u32 id, int iif,
 84		   struct route4_filter *f)
 85{
 86	int h = route4_fastmap_hash(id, iif);
 87
 
 
 88	head->fastmap[h].id = id;
 89	head->fastmap[h].iif = iif;
 90	head->fastmap[h].filter = f;
 
 91}
 92
 93static inline int route4_hash_to(u32 id)
 94{
 95	return id & 0xFF;
 96}
 97
 98static inline int route4_hash_from(u32 id)
 99{
100	return (id >> 16) & 0xF;
101}
102
103static inline int route4_hash_iif(int iif)
104{
105	return 16 + ((iif >> 16) & 0xF);
106}
107
108static inline int route4_hash_wild(void)
109{
110	return 32;
111}
112
113#define ROUTE4_APPLY_RESULT()					\
114{								\
115	*res = f->res;						\
116	if (tcf_exts_is_available(&f->exts)) {			\
117		int r = tcf_exts_exec(skb, &f->exts, res);	\
118		if (r < 0) {					\
119			dont_cache = 1;				\
120			continue;				\
121		}						\
122		return r;					\
123	} else if (!dont_cache)					\
124		route4_set_fastmap(head, id, iif, f);		\
125	return 0;						\
126}
127
128static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp,
129			   struct tcf_result *res)
130{
131	struct route4_head *head = (struct route4_head *)tp->root;
132	struct dst_entry *dst;
133	struct route4_bucket *b;
134	struct route4_filter *f;
135	u32 id, h;
136	int iif, dont_cache = 0;
137
138	dst = skb_dst(skb);
139	if (!dst)
140		goto failure;
141
142	id = dst->tclassid;
143	if (head == NULL)
144		goto old_method;
145
146	iif = ((struct rtable *)dst)->rt_iif;
147
148	h = route4_fastmap_hash(id, iif);
 
 
149	if (id == head->fastmap[h].id &&
150	    iif == head->fastmap[h].iif &&
151	    (f = head->fastmap[h].filter) != NULL) {
152		if (f == ROUTE4_FAILURE)
 
153			goto failure;
 
154
155		*res = f->res;
 
156		return 0;
157	}
 
158
159	h = route4_hash_to(id);
160
161restart:
162	b = head->table[h];
163	if (b) {
164		for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
 
 
165			if (f->id == id)
166				ROUTE4_APPLY_RESULT();
167
168		for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
 
 
169			if (f->iif == iif)
170				ROUTE4_APPLY_RESULT();
171
172		for (f = b->ht[route4_hash_wild()]; f; f = f->next)
 
 
173			ROUTE4_APPLY_RESULT();
174
175	}
176	if (h < 256) {
177		h = 256;
178		id &= ~0xFFFF;
179		goto restart;
180	}
181
182	if (!dont_cache)
183		route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
184failure:
185	return -1;
186
187old_method:
188	if (id && (TC_H_MAJ(id) == 0 ||
189		   !(TC_H_MAJ(id^tp->q->handle)))) {
190		res->classid = id;
191		res->class = 0;
192		return 0;
193	}
194	return -1;
195}
196
197static inline u32 to_hash(u32 id)
198{
199	u32 h = id & 0xFF;
200
201	if (id & 0x8000)
202		h += 256;
203	return h;
204}
205
206static inline u32 from_hash(u32 id)
207{
208	id &= 0xFFFF;
209	if (id == 0xFFFF)
210		return 32;
211	if (!(id & 0x8000)) {
212		if (id > 255)
213			return 256;
214		return id & 0xF;
215	}
216	return 16 + (id & 0xF);
217}
218
219static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
220{
221	struct route4_head *head = (struct route4_head *)tp->root;
222	struct route4_bucket *b;
223	struct route4_filter *f;
224	unsigned int h1, h2;
225
226	if (!head)
227		return 0;
228
229	h1 = to_hash(handle);
230	if (h1 > 256)
231		return 0;
232
233	h2 = from_hash(handle >> 16);
234	if (h2 > 32)
235		return 0;
236
237	b = head->table[h1];
238	if (b) {
239		for (f = b->ht[h2]; f; f = f->next)
 
 
240			if (f->handle == handle)
241				return (unsigned long)f;
242	}
243	return 0;
244}
245
246static void route4_put(struct tcf_proto *tp, unsigned long f)
247{
248}
 
 
 
 
249
250static int route4_init(struct tcf_proto *tp)
251{
252	return 0;
253}
254
255static void
256route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
257{
258	tcf_unbind_filter(tp, &f->res);
259	tcf_exts_destroy(tp, &f->exts);
 
260	kfree(f);
261}
262
263static void route4_destroy(struct tcf_proto *tp)
264{
265	struct route4_head *head = tp->root;
266	int h1, h2;
267
268	if (head == NULL)
269		return;
 
 
 
 
 
 
 
270
271	for (h1 = 0; h1 <= 256; h1++) {
272		struct route4_bucket *b;
273
274		b = head->table[h1];
275		if (b) {
276			for (h2 = 0; h2 <= 32; h2++) {
277				struct route4_filter *f;
278
279				while ((f = b->ht[h2]) != NULL) {
280					b->ht[h2] = f->next;
281					route4_delete_filter(tp, f);
 
 
 
 
282				}
283			}
284			kfree(b);
 
285		}
286	}
287	kfree(head);
 
 
288}
289
290static int route4_delete(struct tcf_proto *tp, unsigned long arg)
291{
292	struct route4_head *head = (struct route4_head *)tp->root;
293	struct route4_filter **fp, *f = (struct route4_filter *)arg;
 
 
 
294	unsigned int h = 0;
295	struct route4_bucket *b;
296	int i;
297
298	if (!head || !f)
299		return -EINVAL;
300
301	h = f->handle;
302	b = f->bkt;
303
304	for (fp = &b->ht[from_hash(h >> 16)]; *fp; fp = &(*fp)->next) {
305		if (*fp == f) {
306			tcf_tree_lock(tp);
307			*fp = f->next;
308			tcf_tree_unlock(tp);
309
310			route4_reset_fastmap(tp->q, head, f->id);
311			route4_delete_filter(tp, f);
312
313			/* Strip tree */
 
 
 
 
 
 
 
 
 
 
314
315			for (i = 0; i <= 32; i++)
316				if (b->ht[i])
317					return 0;
 
318
319			/* OK, session has no flows */
320			tcf_tree_lock(tp);
321			head->table[to_hash(h)] = NULL;
322			tcf_tree_unlock(tp);
323
324			kfree(b);
325			return 0;
326		}
327	}
328	return 0;
329}
330
331static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
332	[TCA_ROUTE4_CLASSID]	= { .type = NLA_U32 },
333	[TCA_ROUTE4_TO]		= { .type = NLA_U32 },
334	[TCA_ROUTE4_FROM]	= { .type = NLA_U32 },
335	[TCA_ROUTE4_IIF]	= { .type = NLA_U32 },
336};
337
338static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
339	struct route4_filter *f, u32 handle, struct route4_head *head,
340	struct nlattr **tb, struct nlattr *est, int new)
 
 
341{
342	int err;
343	u32 id = 0, to = 0, nhandle = 0x8000;
344	struct route4_filter *fp;
345	unsigned int h1;
346	struct route4_bucket *b;
347	struct tcf_exts e;
348
349	err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
 
350	if (err < 0)
351		return err;
352
353	err = -EINVAL;
354	if (tb[TCA_ROUTE4_TO]) {
355		if (new && handle & 0x8000)
356			goto errout;
357		to = nla_get_u32(tb[TCA_ROUTE4_TO]);
358		if (to > 0xFF)
359			goto errout;
360		nhandle = to;
361	}
362
363	if (tb[TCA_ROUTE4_FROM]) {
364		if (tb[TCA_ROUTE4_IIF])
365			goto errout;
366		id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
367		if (id > 0xFF)
368			goto errout;
369		nhandle |= id << 16;
370	} else if (tb[TCA_ROUTE4_IIF]) {
371		id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
372		if (id > 0x7FFF)
373			goto errout;
374		nhandle |= (id | 0x8000) << 16;
375	} else
376		nhandle |= 0xFFFF << 16;
377
378	if (handle && new) {
379		nhandle |= handle & 0x7F00;
380		if (nhandle != handle)
381			goto errout;
382	}
383
384	h1 = to_hash(nhandle);
385	b = head->table[h1];
386	if (!b) {
387		err = -ENOBUFS;
388		b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
389		if (b == NULL)
390			goto errout;
391
392		tcf_tree_lock(tp);
393		head->table[h1] = b;
394		tcf_tree_unlock(tp);
395	} else {
396		unsigned int h2 = from_hash(nhandle >> 16);
397
398		err = -EEXIST;
399		for (fp = b->ht[h2]; fp; fp = fp->next)
 
 
400			if (fp->handle == f->handle)
401				goto errout;
402	}
403
404	tcf_tree_lock(tp);
405	if (tb[TCA_ROUTE4_TO])
406		f->id = to;
407
408	if (tb[TCA_ROUTE4_FROM])
409		f->id = to | id<<16;
410	else if (tb[TCA_ROUTE4_IIF])
411		f->iif = id;
412
413	f->handle = nhandle;
414	f->bkt = b;
415	tcf_tree_unlock(tp);
416
417	if (tb[TCA_ROUTE4_CLASSID]) {
418		f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
419		tcf_bind_filter(tp, &f->res, base);
420	}
421
422	tcf_exts_change(tp, &f->exts, &e);
423
424	return 0;
425errout:
426	tcf_exts_destroy(tp, &e);
427	return err;
428}
429
430static int route4_change(struct tcf_proto *tp, unsigned long base,
 
431		       u32 handle,
432		       struct nlattr **tca,
433		       unsigned long *arg)
434{
435	struct route4_head *head = tp->root;
436	struct route4_filter *f, *f1, **fp;
 
437	struct route4_bucket *b;
438	struct nlattr *opt = tca[TCA_OPTIONS];
439	struct nlattr *tb[TCA_ROUTE4_MAX + 1];
440	unsigned int h, th;
441	u32 old_handle = 0;
442	int err;
 
443
444	if (opt == NULL)
445		return handle ? -EINVAL : 0;
446
447	err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy);
448	if (err < 0)
449		return err;
450
451	f = (struct route4_filter *)*arg;
452	if (f) {
453		if (f->handle != handle && handle)
454			return -EINVAL;
455
456		if (f->bkt)
457			old_handle = f->handle;
458
459		err = route4_set_parms(tp, base, f, handle, head, tb,
460			tca[TCA_RATE], 0);
461		if (err < 0)
462			return err;
463
464		goto reinsert;
465	}
466
467	err = -ENOBUFS;
468	if (head == NULL) {
469		head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
470		if (head == NULL)
471			goto errout;
472
473		tcf_tree_lock(tp);
474		tp->root = head;
475		tcf_tree_unlock(tp);
 
 
 
 
 
 
 
476	}
477
478	f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
479	if (f == NULL)
480		goto errout;
481
482	err = route4_set_parms(tp, base, f, handle, head, tb,
483		tca[TCA_RATE], 1);
484	if (err < 0)
485		goto errout;
486
487reinsert:
488	h = from_hash(f->handle >> 16);
489	for (fp = &f->bkt->ht[h]; (f1 = *fp) != NULL; fp = &f1->next)
 
 
 
490		if (f->handle < f1->handle)
491			break;
492
493	f->next = f1;
494	tcf_tree_lock(tp);
495	*fp = f;
496
497	if (old_handle && f->handle != old_handle) {
498		th = to_hash(old_handle);
499		h = from_hash(old_handle >> 16);
500		b = head->table[th];
501		if (b) {
502			for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
503				if (*fp == f) {
 
 
504					*fp = f->next;
505					break;
506				}
507			}
508		}
509	}
510	tcf_tree_unlock(tp);
511
512	route4_reset_fastmap(tp->q, head, f->id);
513	*arg = (unsigned long)f;
 
 
 
 
514	return 0;
515
516errout:
517	kfree(f);
518	return err;
519}
520
521static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
522{
523	struct route4_head *head = tp->root;
524	unsigned int h, h1;
525
526	if (head == NULL)
527		arg->stop = 1;
528
529	if (arg->stop)
530		return;
531
532	for (h = 0; h <= 256; h++) {
533		struct route4_bucket *b = head->table[h];
534
535		if (b) {
536			for (h1 = 0; h1 <= 32; h1++) {
537				struct route4_filter *f;
538
539				for (f = b->ht[h1]; f; f = f->next) {
 
 
540					if (arg->count < arg->skip) {
541						arg->count++;
542						continue;
543					}
544					if (arg->fn(tp, (unsigned long)f, arg) < 0) {
545						arg->stop = 1;
546						return;
547					}
548					arg->count++;
549				}
550			}
551		}
552	}
553}
554
555static int route4_dump(struct tcf_proto *tp, unsigned long fh,
556		       struct sk_buff *skb, struct tcmsg *t)
557{
558	struct route4_filter *f = (struct route4_filter *)fh;
559	unsigned char *b = skb_tail_pointer(skb);
560	struct nlattr *nest;
561	u32 id;
562
563	if (f == NULL)
564		return skb->len;
565
566	t->tcm_handle = f->handle;
567
568	nest = nla_nest_start(skb, TCA_OPTIONS);
569	if (nest == NULL)
570		goto nla_put_failure;
571
572	if (!(f->handle & 0x8000)) {
573		id = f->id & 0xFF;
574		NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
 
575	}
576	if (f->handle & 0x80000000) {
577		if ((f->handle >> 16) != 0xFFFF)
578			NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
 
579	} else {
580		id = f->id >> 16;
581		NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
 
582	}
583	if (f->res.classid)
584		NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid);
 
585
586	if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
587		goto nla_put_failure;
588
589	nla_nest_end(skb, nest);
590
591	if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
592		goto nla_put_failure;
593
594	return skb->len;
595
596nla_put_failure:
597	nlmsg_trim(skb, b);
598	return -1;
599}
600
601static struct tcf_proto_ops cls_route4_ops __read_mostly = {
602	.kind		=	"route",
603	.classify	=	route4_classify,
604	.init		=	route4_init,
605	.destroy	=	route4_destroy,
606	.get		=	route4_get,
607	.put		=	route4_put,
608	.change		=	route4_change,
609	.delete		=	route4_delete,
610	.walk		=	route4_walk,
611	.dump		=	route4_dump,
612	.owner		=	THIS_MODULE,
613};
614
615static int __init init_route4(void)
616{
617	return register_tcf_proto_ops(&cls_route4_ops);
618}
619
620static void __exit exit_route4(void)
621{
622	unregister_tcf_proto_ops(&cls_route4_ops);
623}
624
625module_init(init_route4)
626module_exit(exit_route4)
627MODULE_LICENSE("GPL");