Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * net/sched/cls_rsvp.h	Template file for RSVPv[46] classifiers.
  4 *
 
 
 
 
 
  5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  6 */
  7
  8/*
  9   Comparing to general packet classification problem,
 10   RSVP needs only sevaral relatively simple rules:
 11
 12   * (dst, protocol) are always specified,
 13     so that we are able to hash them.
 14   * src may be exact, or may be wildcard, so that
 15     we can keep a hash table plus one wildcard entry.
 16   * source port (or flow label) is important only if src is given.
 17
 18   IMPLEMENTATION.
 19
 20   We use a two level hash table: The top level is keyed by
 21   destination address and protocol ID, every bucket contains a list
 22   of "rsvp sessions", identified by destination address, protocol and
 23   DPI(="Destination Port ID"): triple (key, mask, offset).
 24
 25   Every bucket has a smaller hash table keyed by source address
 26   (cf. RSVP flowspec) and one wildcard entry for wildcard reservations.
 27   Every bucket is again a list of "RSVP flows", selected by
 28   source address and SPI(="Source Port ID" here rather than
 29   "security parameter index"): triple (key, mask, offset).
 30
 31
 32   NOTE 1. All the packets with IPv6 extension headers (but AH and ESP)
 33   and all fragmented packets go to the best-effort traffic class.
 34
 35
 36   NOTE 2. Two "port id"'s seems to be redundant, rfc2207 requires
 37   only one "Generalized Port Identifier". So that for classic
 38   ah, esp (and udp,tcp) both *pi should coincide or one of them
 39   should be wildcard.
 40
 41   At first sight, this redundancy is just a waste of CPU
 42   resources. But DPI and SPI add the possibility to assign different
 43   priorities to GPIs. Look also at note 4 about tunnels below.
 44
 45
 46   NOTE 3. One complication is the case of tunneled packets.
 47   We implement it as following: if the first lookup
 48   matches a special session with "tunnelhdr" value not zero,
 49   flowid doesn't contain the true flow ID, but the tunnel ID (1...255).
 50   In this case, we pull tunnelhdr bytes and restart lookup
 51   with tunnel ID added to the list of keys. Simple and stupid 8)8)
 52   It's enough for PIMREG and IPIP.
 53
 54
 55   NOTE 4. Two GPIs make it possible to parse even GRE packets.
 56   F.e. DPI can select ETH_P_IP (and necessary flags to make
 57   tunnelhdr correct) in GRE protocol field and SPI matches
 58   GRE key. Is it not nice? 8)8)
 59
 60
 61   Well, as result, despite its simplicity, we get a pretty
 62   powerful classification engine.  */
 63
 64
 65struct rsvp_head {
 66	u32			tmap[256/32];
 67	u32			hgenerator;
 68	u8			tgenerator;
 69	struct rsvp_session __rcu *ht[256];
 70	struct rcu_head		rcu;
 71};
 72
 73struct rsvp_session {
 74	struct rsvp_session __rcu	*next;
 75	__be32				dst[RSVP_DST_LEN];
 76	struct tc_rsvp_gpi		dpi;
 77	u8				protocol;
 78	u8				tunnelid;
 79	/* 16 (src,sport) hash slots, and one wildcard source slot */
 80	struct rsvp_filter __rcu	*ht[16 + 1];
 81	struct rcu_head			rcu;
 82};
 83
 84
 85struct rsvp_filter {
 86	struct rsvp_filter __rcu	*next;
 87	__be32				src[RSVP_DST_LEN];
 88	struct tc_rsvp_gpi		spi;
 89	u8				tunnelhdr;
 90
 91	struct tcf_result		res;
 92	struct tcf_exts			exts;
 93
 94	u32				handle;
 95	struct rsvp_session		*sess;
 96	struct rcu_work			rwork;
 97};
 98
 99static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
100{
101	unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
102
103	h ^= h>>16;
104	h ^= h>>8;
105	return (h ^ protocol ^ tunnelid) & 0xFF;
106}
107
108static inline unsigned int hash_src(__be32 *src)
109{
110	unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
111
112	h ^= h>>16;
113	h ^= h>>8;
114	h ^= h>>4;
115	return h & 0xF;
116}
117
118#define RSVP_APPLY_RESULT()				\
119{							\
120	int r = tcf_exts_exec(skb, &f->exts, res);	\
121	if (r < 0)					\
122		continue;				\
123	else if (r > 0)					\
124		return r;				\
125}
126
127static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
128			 struct tcf_result *res)
129{
130	struct rsvp_head *head = rcu_dereference_bh(tp->root);
131	struct rsvp_session *s;
132	struct rsvp_filter *f;
133	unsigned int h1, h2;
134	__be32 *dst, *src;
135	u8 protocol;
136	u8 tunnelid = 0;
137	u8 *xprt;
138#if RSVP_DST_LEN == 4
139	struct ipv6hdr *nhptr;
140
141	if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
142		return -1;
143	nhptr = ipv6_hdr(skb);
144#else
145	struct iphdr *nhptr;
146
147	if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
148		return -1;
149	nhptr = ip_hdr(skb);
150#endif
 
151restart:
152
153#if RSVP_DST_LEN == 4
154	src = &nhptr->saddr.s6_addr32[0];
155	dst = &nhptr->daddr.s6_addr32[0];
156	protocol = nhptr->nexthdr;
157	xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
158#else
159	src = &nhptr->saddr;
160	dst = &nhptr->daddr;
161	protocol = nhptr->protocol;
162	xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
163	if (ip_is_fragment(nhptr))
164		return -1;
165#endif
166
167	h1 = hash_dst(dst, protocol, tunnelid);
168	h2 = hash_src(src);
169
170	for (s = rcu_dereference_bh(head->ht[h1]); s;
171	     s = rcu_dereference_bh(s->next)) {
172		if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
173		    protocol == s->protocol &&
174		    !(s->dpi.mask &
175		      (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
176#if RSVP_DST_LEN == 4
177		    dst[0] == s->dst[0] &&
178		    dst[1] == s->dst[1] &&
179		    dst[2] == s->dst[2] &&
180#endif
181		    tunnelid == s->tunnelid) {
182
183			for (f = rcu_dereference_bh(s->ht[h2]); f;
184			     f = rcu_dereference_bh(f->next)) {
185				if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
186				    !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
187#if RSVP_DST_LEN == 4
188				    &&
189				    src[0] == f->src[0] &&
190				    src[1] == f->src[1] &&
191				    src[2] == f->src[2]
192#endif
193				    ) {
194					*res = f->res;
195					RSVP_APPLY_RESULT();
196
197matched:
198					if (f->tunnelhdr == 0)
199						return 0;
200
201					tunnelid = f->res.classid;
202					nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
203					goto restart;
204				}
205			}
206
207			/* And wildcard bucket... */
208			for (f = rcu_dereference_bh(s->ht[16]); f;
209			     f = rcu_dereference_bh(f->next)) {
210				*res = f->res;
211				RSVP_APPLY_RESULT();
212				goto matched;
213			}
214			return -1;
215		}
216	}
217	return -1;
218}
219
220static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h)
221{
222	struct rsvp_head *head = rtnl_dereference(tp->root);
223	struct rsvp_session *s;
224	struct rsvp_filter __rcu **ins;
225	struct rsvp_filter *pins;
226	unsigned int h1 = h & 0xFF;
227	unsigned int h2 = (h >> 8) & 0xFF;
228
229	for (s = rtnl_dereference(head->ht[h1]); s;
230	     s = rtnl_dereference(s->next)) {
231		for (ins = &s->ht[h2], pins = rtnl_dereference(*ins); ;
232		     ins = &pins->next, pins = rtnl_dereference(*ins)) {
233			if (pins->handle == h) {
234				RCU_INIT_POINTER(n->next, pins->next);
235				rcu_assign_pointer(*ins, n);
236				return;
237			}
238		}
239	}
240
241	/* Something went wrong if we are trying to replace a non-existant
242	 * node. Mind as well halt instead of silently failing.
243	 */
244	BUG_ON(1);
245}
246
247static void *rsvp_get(struct tcf_proto *tp, u32 handle)
248{
249	struct rsvp_head *head = rtnl_dereference(tp->root);
250	struct rsvp_session *s;
251	struct rsvp_filter *f;
252	unsigned int h1 = handle & 0xFF;
253	unsigned int h2 = (handle >> 8) & 0xFF;
254
255	if (h2 > 16)
256		return NULL;
257
258	for (s = rtnl_dereference(head->ht[h1]); s;
259	     s = rtnl_dereference(s->next)) {
260		for (f = rtnl_dereference(s->ht[h2]); f;
261		     f = rtnl_dereference(f->next)) {
262			if (f->handle == handle)
263				return f;
264		}
265	}
266	return NULL;
 
 
 
 
267}
268
269static int rsvp_init(struct tcf_proto *tp)
270{
271	struct rsvp_head *data;
272
273	data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL);
274	if (data) {
275		rcu_assign_pointer(tp->root, data);
276		return 0;
277	}
278	return -ENOBUFS;
279}
280
281static void __rsvp_delete_filter(struct rsvp_filter *f)
282{
283	tcf_exts_destroy(&f->exts);
284	tcf_exts_put_net(&f->exts);
285	kfree(f);
286}
287
288static void rsvp_delete_filter_work(struct work_struct *work)
289{
290	struct rsvp_filter *f = container_of(to_rcu_work(work),
291					     struct rsvp_filter,
292					     rwork);
293	rtnl_lock();
294	__rsvp_delete_filter(f);
295	rtnl_unlock();
296}
297
298static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
299{
300	tcf_unbind_filter(tp, &f->res);
301	/* all classifiers are required to call tcf_exts_destroy() after rcu
302	 * grace period, since converted-to-rcu actions are relying on that
303	 * in cleanup() callback
304	 */
305	if (tcf_exts_get_net(&f->exts))
306		tcf_queue_work(&f->rwork, rsvp_delete_filter_work);
307	else
308		__rsvp_delete_filter(f);
309}
310
311static void rsvp_destroy(struct tcf_proto *tp, bool rtnl_held,
312			 struct netlink_ext_ack *extack)
313{
314	struct rsvp_head *data = rtnl_dereference(tp->root);
 
315	int h1, h2;
316
317	if (data == NULL)
318		return;
319
 
 
320	for (h1 = 0; h1 < 256; h1++) {
321		struct rsvp_session *s;
322
323		while ((s = rtnl_dereference(data->ht[h1])) != NULL) {
324			RCU_INIT_POINTER(data->ht[h1], s->next);
325
326			for (h2 = 0; h2 <= 16; h2++) {
327				struct rsvp_filter *f;
328
329				while ((f = rtnl_dereference(s->ht[h2])) != NULL) {
330					rcu_assign_pointer(s->ht[h2], f->next);
331					rsvp_delete_filter(tp, f);
332				}
333			}
334			kfree_rcu(s, rcu);
335		}
336	}
337	kfree_rcu(data, rcu);
338}
339
340static int rsvp_delete(struct tcf_proto *tp, void *arg, bool *last,
341		       bool rtnl_held, struct netlink_ext_ack *extack)
342{
343	struct rsvp_head *head = rtnl_dereference(tp->root);
344	struct rsvp_filter *nfp, *f = arg;
345	struct rsvp_filter __rcu **fp;
346	unsigned int h = f->handle;
347	struct rsvp_session __rcu **sp;
348	struct rsvp_session *nsp, *s = f->sess;
349	int i, h1;
350
351	fp = &s->ht[(h >> 8) & 0xFF];
352	for (nfp = rtnl_dereference(*fp); nfp;
353	     fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
354		if (nfp == f) {
355			RCU_INIT_POINTER(*fp, f->next);
356			rsvp_delete_filter(tp, f);
357
358			/* Strip tree */
359
360			for (i = 0; i <= 16; i++)
361				if (s->ht[i])
362					goto out;
363
364			/* OK, session has no flows */
365			sp = &head->ht[h & 0xFF];
366			for (nsp = rtnl_dereference(*sp); nsp;
367			     sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
368				if (nsp == s) {
369					RCU_INIT_POINTER(*sp, s->next);
370					kfree_rcu(s, rcu);
371					goto out;
 
 
372				}
373			}
374
375			break;
376		}
377	}
378
379out:
380	*last = true;
381	for (h1 = 0; h1 < 256; h1++) {
382		if (rcu_access_pointer(head->ht[h1])) {
383			*last = false;
384			break;
385		}
386	}
387
388	return 0;
389}
390
391static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
392{
393	struct rsvp_head *data = rtnl_dereference(tp->root);
394	int i = 0xFFFF;
395
396	while (i-- > 0) {
397		u32 h;
398
399		if ((data->hgenerator += 0x10000) == 0)
400			data->hgenerator = 0x10000;
401		h = data->hgenerator|salt;
402		if (!rsvp_get(tp, h))
403			return h;
404	}
405	return 0;
406}
407
408static int tunnel_bts(struct rsvp_head *data)
409{
410	int n = data->tgenerator >> 5;
411	u32 b = 1 << (data->tgenerator & 0x1F);
412
413	if (data->tmap[n] & b)
414		return 0;
415	data->tmap[n] |= b;
416	return 1;
417}
418
419static void tunnel_recycle(struct rsvp_head *data)
420{
421	struct rsvp_session __rcu **sht = data->ht;
422	u32 tmap[256/32];
423	int h1, h2;
424
425	memset(tmap, 0, sizeof(tmap));
426
427	for (h1 = 0; h1 < 256; h1++) {
428		struct rsvp_session *s;
429		for (s = rtnl_dereference(sht[h1]); s;
430		     s = rtnl_dereference(s->next)) {
431			for (h2 = 0; h2 <= 16; h2++) {
432				struct rsvp_filter *f;
433
434				for (f = rtnl_dereference(s->ht[h2]); f;
435				     f = rtnl_dereference(f->next)) {
436					if (f->tunnelhdr == 0)
437						continue;
438					data->tgenerator = f->res.classid;
439					tunnel_bts(data);
440				}
441			}
442		}
443	}
444
445	memcpy(data->tmap, tmap, sizeof(tmap));
446}
447
448static u32 gen_tunnel(struct rsvp_head *data)
449{
450	int i, k;
451
452	for (k = 0; k < 2; k++) {
453		for (i = 255; i > 0; i--) {
454			if (++data->tgenerator == 0)
455				data->tgenerator = 1;
456			if (tunnel_bts(data))
457				return data->tgenerator;
458		}
459		tunnel_recycle(data);
460	}
461	return 0;
462}
463
464static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
465	[TCA_RSVP_CLASSID]	= { .type = NLA_U32 },
466	[TCA_RSVP_DST]		= { .type = NLA_BINARY,
467				    .len = RSVP_DST_LEN * sizeof(u32) },
468	[TCA_RSVP_SRC]		= { .type = NLA_BINARY,
469				    .len = RSVP_DST_LEN * sizeof(u32) },
470	[TCA_RSVP_PINFO]	= { .len = sizeof(struct tc_rsvp_pinfo) },
471};
472
473static int rsvp_change(struct net *net, struct sk_buff *in_skb,
474		       struct tcf_proto *tp, unsigned long base,
475		       u32 handle,
476		       struct nlattr **tca,
477		       void **arg, bool ovr, bool rtnl_held,
478		       struct netlink_ext_ack *extack)
479{
480	struct rsvp_head *data = rtnl_dereference(tp->root);
481	struct rsvp_filter *f, *nfp;
482	struct rsvp_filter __rcu **fp;
483	struct rsvp_session *nsp, *s;
484	struct rsvp_session __rcu **sp;
485	struct tc_rsvp_pinfo *pinfo = NULL;
486	struct nlattr *opt = tca[TCA_OPTIONS];
487	struct nlattr *tb[TCA_RSVP_MAX + 1];
488	struct tcf_exts e;
489	unsigned int h1, h2;
490	__be32 *dst;
491	int err;
492
493	if (opt == NULL)
494		return handle ? -EINVAL : 0;
495
496	err = nla_parse_nested_deprecated(tb, TCA_RSVP_MAX, opt, rsvp_policy,
497					  NULL);
498	if (err < 0)
499		return err;
500
501	err = tcf_exts_init(&e, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
 
502	if (err < 0)
503		return err;
504	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr, true,
505				extack);
506	if (err < 0)
507		goto errout2;
508
509	f = *arg;
510	if (f) {
511		/* Node exists: adjust only classid */
512		struct rsvp_filter *n;
513
514		if (f->handle != handle && handle)
515			goto errout2;
516
517		n = kmemdup(f, sizeof(*f), GFP_KERNEL);
518		if (!n) {
519			err = -ENOMEM;
520			goto errout2;
521		}
522
523		err = tcf_exts_init(&n->exts, net, TCA_RSVP_ACT,
524				    TCA_RSVP_POLICE);
525		if (err < 0) {
526			kfree(n);
527			goto errout2;
528		}
529
530		if (tb[TCA_RSVP_CLASSID]) {
531			n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
532			tcf_bind_filter(tp, &n->res, base);
533		}
534
535		tcf_exts_change(&n->exts, &e);
536		rsvp_replace(tp, n, handle);
537		return 0;
538	}
539
540	/* Now more serious part... */
541	err = -EINVAL;
542	if (handle)
543		goto errout2;
544	if (tb[TCA_RSVP_DST] == NULL)
545		goto errout2;
546
547	err = -ENOBUFS;
548	f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL);
549	if (f == NULL)
550		goto errout2;
551
552	err = tcf_exts_init(&f->exts, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
553	if (err < 0)
554		goto errout;
555	h2 = 16;
556	if (tb[TCA_RSVP_SRC]) {
557		memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
558		h2 = hash_src(f->src);
559	}
560	if (tb[TCA_RSVP_PINFO]) {
561		pinfo = nla_data(tb[TCA_RSVP_PINFO]);
562		f->spi = pinfo->spi;
563		f->tunnelhdr = pinfo->tunnelhdr;
564	}
565	if (tb[TCA_RSVP_CLASSID])
566		f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
567
568	dst = nla_data(tb[TCA_RSVP_DST]);
569	h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
570
571	err = -ENOMEM;
572	if ((f->handle = gen_handle(tp, h1 | (h2<<8))) == 0)
573		goto errout;
574
575	if (f->tunnelhdr) {
576		err = -EINVAL;
577		if (f->res.classid > 255)
578			goto errout;
579
580		err = -ENOMEM;
581		if (f->res.classid == 0 &&
582		    (f->res.classid = gen_tunnel(data)) == 0)
583			goto errout;
584	}
585
586	for (sp = &data->ht[h1];
587	     (s = rtnl_dereference(*sp)) != NULL;
588	     sp = &s->next) {
589		if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
590		    pinfo && pinfo->protocol == s->protocol &&
591		    memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
592#if RSVP_DST_LEN == 4
593		    dst[0] == s->dst[0] &&
594		    dst[1] == s->dst[1] &&
595		    dst[2] == s->dst[2] &&
596#endif
597		    pinfo->tunnelid == s->tunnelid) {
598
599insert:
600			/* OK, we found appropriate session */
601
602			fp = &s->ht[h2];
603
604			f->sess = s;
605			if (f->tunnelhdr == 0)
606				tcf_bind_filter(tp, &f->res, base);
607
608			tcf_exts_change(&f->exts, &e);
609
610			fp = &s->ht[h2];
611			for (nfp = rtnl_dereference(*fp); nfp;
612			     fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
613				__u32 mask = nfp->spi.mask & f->spi.mask;
614
615				if (mask != f->spi.mask)
616					break;
617			}
618			RCU_INIT_POINTER(f->next, nfp);
619			rcu_assign_pointer(*fp, f);
620
621			*arg = f;
622			return 0;
623		}
624	}
625
626	/* No session found. Create new one. */
627
628	err = -ENOBUFS;
629	s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL);
630	if (s == NULL)
631		goto errout;
632	memcpy(s->dst, dst, sizeof(s->dst));
633
634	if (pinfo) {
635		s->dpi = pinfo->dpi;
636		s->protocol = pinfo->protocol;
637		s->tunnelid = pinfo->tunnelid;
638	}
639	sp = &data->ht[h1];
640	for (nsp = rtnl_dereference(*sp); nsp;
641	     sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
642		if ((nsp->dpi.mask & s->dpi.mask) != s->dpi.mask)
643			break;
644	}
645	RCU_INIT_POINTER(s->next, nsp);
646	rcu_assign_pointer(*sp, s);
 
647
648	goto insert;
649
650errout:
651	tcf_exts_destroy(&f->exts);
652	kfree(f);
653errout2:
654	tcf_exts_destroy(&e);
655	return err;
656}
657
658static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg,
659		      bool rtnl_held)
660{
661	struct rsvp_head *head = rtnl_dereference(tp->root);
662	unsigned int h, h1;
663
664	if (arg->stop)
665		return;
666
667	for (h = 0; h < 256; h++) {
668		struct rsvp_session *s;
669
670		for (s = rtnl_dereference(head->ht[h]); s;
671		     s = rtnl_dereference(s->next)) {
672			for (h1 = 0; h1 <= 16; h1++) {
673				struct rsvp_filter *f;
674
675				for (f = rtnl_dereference(s->ht[h1]); f;
676				     f = rtnl_dereference(f->next)) {
677					if (arg->count < arg->skip) {
678						arg->count++;
679						continue;
680					}
681					if (arg->fn(tp, f, arg) < 0) {
682						arg->stop = 1;
683						return;
684					}
685					arg->count++;
686				}
687			}
688		}
689	}
690}
691
692static int rsvp_dump(struct net *net, struct tcf_proto *tp, void *fh,
693		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
694{
695	struct rsvp_filter *f = fh;
696	struct rsvp_session *s;
 
697	struct nlattr *nest;
698	struct tc_rsvp_pinfo pinfo;
699
700	if (f == NULL)
701		return skb->len;
702	s = f->sess;
703
704	t->tcm_handle = f->handle;
705
706	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
707	if (nest == NULL)
708		goto nla_put_failure;
709
710	if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
711		goto nla_put_failure;
712	pinfo.dpi = s->dpi;
713	pinfo.spi = f->spi;
714	pinfo.protocol = s->protocol;
715	pinfo.tunnelid = s->tunnelid;
716	pinfo.tunnelhdr = f->tunnelhdr;
717	pinfo.pad = 0;
718	if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
719		goto nla_put_failure;
720	if (f->res.classid &&
721	    nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
722		goto nla_put_failure;
723	if (((f->handle >> 8) & 0xFF) != 16 &&
724	    nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
725		goto nla_put_failure;
726
727	if (tcf_exts_dump(skb, &f->exts) < 0)
728		goto nla_put_failure;
729
730	nla_nest_end(skb, nest);
731
732	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
733		goto nla_put_failure;
734	return skb->len;
735
736nla_put_failure:
737	nla_nest_cancel(skb, nest);
738	return -1;
739}
740
741static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl)
742{
743	struct rsvp_filter *f = fh;
744
745	if (f && f->res.classid == classid)
746		f->res.class = cl;
747}
748
749static struct tcf_proto_ops RSVP_OPS __read_mostly = {
750	.kind		=	RSVP_ID,
751	.classify	=	rsvp_classify,
752	.init		=	rsvp_init,
753	.destroy	=	rsvp_destroy,
754	.get		=	rsvp_get,
 
755	.change		=	rsvp_change,
756	.delete		=	rsvp_delete,
757	.walk		=	rsvp_walk,
758	.dump		=	rsvp_dump,
759	.bind_class	=	rsvp_bind_class,
760	.owner		=	THIS_MODULE,
761};
762
763static int __init init_rsvp(void)
764{
765	return register_tcf_proto_ops(&RSVP_OPS);
766}
767
768static void __exit exit_rsvp(void)
769{
770	unregister_tcf_proto_ops(&RSVP_OPS);
771}
772
773module_init(init_rsvp)
774module_exit(exit_rsvp)
v3.15
 
  1/*
  2 * net/sched/cls_rsvp.h	Template file for RSVPv[46] classifiers.
  3 *
  4 *		This program is free software; you can redistribute it and/or
  5 *		modify it under the terms of the GNU General Public License
  6 *		as published by the Free Software Foundation; either version
  7 *		2 of the License, or (at your option) any later version.
  8 *
  9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 10 */
 11
 12/*
 13   Comparing to general packet classification problem,
 14   RSVP needs only sevaral relatively simple rules:
 15
 16   * (dst, protocol) are always specified,
 17     so that we are able to hash them.
 18   * src may be exact, or may be wildcard, so that
 19     we can keep a hash table plus one wildcard entry.
 20   * source port (or flow label) is important only if src is given.
 21
 22   IMPLEMENTATION.
 23
 24   We use a two level hash table: The top level is keyed by
 25   destination address and protocol ID, every bucket contains a list
 26   of "rsvp sessions", identified by destination address, protocol and
 27   DPI(="Destination Port ID"): triple (key, mask, offset).
 28
 29   Every bucket has a smaller hash table keyed by source address
 30   (cf. RSVP flowspec) and one wildcard entry for wildcard reservations.
 31   Every bucket is again a list of "RSVP flows", selected by
 32   source address and SPI(="Source Port ID" here rather than
 33   "security parameter index"): triple (key, mask, offset).
 34
 35
 36   NOTE 1. All the packets with IPv6 extension headers (but AH and ESP)
 37   and all fragmented packets go to the best-effort traffic class.
 38
 39
 40   NOTE 2. Two "port id"'s seems to be redundant, rfc2207 requires
 41   only one "Generalized Port Identifier". So that for classic
 42   ah, esp (and udp,tcp) both *pi should coincide or one of them
 43   should be wildcard.
 44
 45   At first sight, this redundancy is just a waste of CPU
 46   resources. But DPI and SPI add the possibility to assign different
 47   priorities to GPIs. Look also at note 4 about tunnels below.
 48
 49
 50   NOTE 3. One complication is the case of tunneled packets.
 51   We implement it as following: if the first lookup
 52   matches a special session with "tunnelhdr" value not zero,
 53   flowid doesn't contain the true flow ID, but the tunnel ID (1...255).
 54   In this case, we pull tunnelhdr bytes and restart lookup
 55   with tunnel ID added to the list of keys. Simple and stupid 8)8)
 56   It's enough for PIMREG and IPIP.
 57
 58
 59   NOTE 4. Two GPIs make it possible to parse even GRE packets.
 60   F.e. DPI can select ETH_P_IP (and necessary flags to make
 61   tunnelhdr correct) in GRE protocol field and SPI matches
 62   GRE key. Is it not nice? 8)8)
 63
 64
 65   Well, as result, despite its simplicity, we get a pretty
 66   powerful classification engine.  */
 67
 68
 69struct rsvp_head {
 70	u32			tmap[256/32];
 71	u32			hgenerator;
 72	u8			tgenerator;
 73	struct rsvp_session	*ht[256];
 
 74};
 75
 76struct rsvp_session {
 77	struct rsvp_session	*next;
 78	__be32			dst[RSVP_DST_LEN];
 79	struct tc_rsvp_gpi 	dpi;
 80	u8			protocol;
 81	u8			tunnelid;
 82	/* 16 (src,sport) hash slots, and one wildcard source slot */
 83	struct rsvp_filter	*ht[16 + 1];
 
 84};
 85
 86
 87struct rsvp_filter {
 88	struct rsvp_filter	*next;
 89	__be32			src[RSVP_DST_LEN];
 90	struct tc_rsvp_gpi	spi;
 91	u8			tunnelhdr;
 92
 93	struct tcf_result	res;
 94	struct tcf_exts		exts;
 95
 96	u32			handle;
 97	struct rsvp_session	*sess;
 
 98};
 99
100static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
101{
102	unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
103
104	h ^= h>>16;
105	h ^= h>>8;
106	return (h ^ protocol ^ tunnelid) & 0xFF;
107}
108
109static inline unsigned int hash_src(__be32 *src)
110{
111	unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
112
113	h ^= h>>16;
114	h ^= h>>8;
115	h ^= h>>4;
116	return h & 0xF;
117}
118
119#define RSVP_APPLY_RESULT()				\
120{							\
121	int r = tcf_exts_exec(skb, &f->exts, res);	\
122	if (r < 0)					\
123		continue;				\
124	else if (r > 0)					\
125		return r;				\
126}
127
128static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
129			 struct tcf_result *res)
130{
131	struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
132	struct rsvp_session *s;
133	struct rsvp_filter *f;
134	unsigned int h1, h2;
135	__be32 *dst, *src;
136	u8 protocol;
137	u8 tunnelid = 0;
138	u8 *xprt;
139#if RSVP_DST_LEN == 4
140	struct ipv6hdr *nhptr;
141
142	if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
143		return -1;
144	nhptr = ipv6_hdr(skb);
145#else
146	struct iphdr *nhptr;
147
148	if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
149		return -1;
150	nhptr = ip_hdr(skb);
151#endif
152
153restart:
154
155#if RSVP_DST_LEN == 4
156	src = &nhptr->saddr.s6_addr32[0];
157	dst = &nhptr->daddr.s6_addr32[0];
158	protocol = nhptr->nexthdr;
159	xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
160#else
161	src = &nhptr->saddr;
162	dst = &nhptr->daddr;
163	protocol = nhptr->protocol;
164	xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
165	if (ip_is_fragment(nhptr))
166		return -1;
167#endif
168
169	h1 = hash_dst(dst, protocol, tunnelid);
170	h2 = hash_src(src);
171
172	for (s = sht[h1]; s; s = s->next) {
 
173		if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
174		    protocol == s->protocol &&
175		    !(s->dpi.mask &
176		      (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
177#if RSVP_DST_LEN == 4
178		    dst[0] == s->dst[0] &&
179		    dst[1] == s->dst[1] &&
180		    dst[2] == s->dst[2] &&
181#endif
182		    tunnelid == s->tunnelid) {
183
184			for (f = s->ht[h2]; f; f = f->next) {
 
185				if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
186				    !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
187#if RSVP_DST_LEN == 4
188				    &&
189				    src[0] == f->src[0] &&
190				    src[1] == f->src[1] &&
191				    src[2] == f->src[2]
192#endif
193				    ) {
194					*res = f->res;
195					RSVP_APPLY_RESULT();
196
197matched:
198					if (f->tunnelhdr == 0)
199						return 0;
200
201					tunnelid = f->res.classid;
202					nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
203					goto restart;
204				}
205			}
206
207			/* And wildcard bucket... */
208			for (f = s->ht[16]; f; f = f->next) {
 
209				*res = f->res;
210				RSVP_APPLY_RESULT();
211				goto matched;
212			}
213			return -1;
214		}
215	}
216	return -1;
217}
218
219static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220{
221	struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
222	struct rsvp_session *s;
223	struct rsvp_filter *f;
224	unsigned int h1 = handle & 0xFF;
225	unsigned int h2 = (handle >> 8) & 0xFF;
226
227	if (h2 > 16)
228		return 0;
229
230	for (s = sht[h1]; s; s = s->next) {
231		for (f = s->ht[h2]; f; f = f->next) {
 
 
232			if (f->handle == handle)
233				return (unsigned long)f;
234		}
235	}
236	return 0;
237}
238
239static void rsvp_put(struct tcf_proto *tp, unsigned long f)
240{
241}
242
243static int rsvp_init(struct tcf_proto *tp)
244{
245	struct rsvp_head *data;
246
247	data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL);
248	if (data) {
249		tp->root = data;
250		return 0;
251	}
252	return -ENOBUFS;
253}
254
255static void
256rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257{
258	tcf_unbind_filter(tp, &f->res);
259	tcf_exts_destroy(tp, &f->exts);
260	kfree(f);
 
 
 
 
 
 
261}
262
263static void rsvp_destroy(struct tcf_proto *tp)
 
264{
265	struct rsvp_head *data = xchg(&tp->root, NULL);
266	struct rsvp_session **sht;
267	int h1, h2;
268
269	if (data == NULL)
270		return;
271
272	sht = data->ht;
273
274	for (h1 = 0; h1 < 256; h1++) {
275		struct rsvp_session *s;
276
277		while ((s = sht[h1]) != NULL) {
278			sht[h1] = s->next;
279
280			for (h2 = 0; h2 <= 16; h2++) {
281				struct rsvp_filter *f;
282
283				while ((f = s->ht[h2]) != NULL) {
284					s->ht[h2] = f->next;
285					rsvp_delete_filter(tp, f);
286				}
287			}
288			kfree(s);
289		}
290	}
291	kfree(data);
292}
293
294static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
 
295{
296	struct rsvp_filter **fp, *f = (struct rsvp_filter *)arg;
 
 
297	unsigned int h = f->handle;
298	struct rsvp_session **sp;
299	struct rsvp_session *s = f->sess;
300	int i;
301
302	for (fp = &s->ht[(h >> 8) & 0xFF]; *fp; fp = &(*fp)->next) {
303		if (*fp == f) {
304			tcf_tree_lock(tp);
305			*fp = f->next;
306			tcf_tree_unlock(tp);
307			rsvp_delete_filter(tp, f);
308
309			/* Strip tree */
310
311			for (i = 0; i <= 16; i++)
312				if (s->ht[i])
313					return 0;
314
315			/* OK, session has no flows */
316			for (sp = &((struct rsvp_head *)tp->root)->ht[h & 0xFF];
317			     *sp; sp = &(*sp)->next) {
318				if (*sp == s) {
319					tcf_tree_lock(tp);
320					*sp = s->next;
321					tcf_tree_unlock(tp);
322
323					kfree(s);
324					return 0;
325				}
326			}
327
328			return 0;
 
 
 
 
 
 
 
 
 
329		}
330	}
 
331	return 0;
332}
333
334static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
335{
336	struct rsvp_head *data = tp->root;
337	int i = 0xFFFF;
338
339	while (i-- > 0) {
340		u32 h;
341
342		if ((data->hgenerator += 0x10000) == 0)
343			data->hgenerator = 0x10000;
344		h = data->hgenerator|salt;
345		if (rsvp_get(tp, h) == 0)
346			return h;
347	}
348	return 0;
349}
350
351static int tunnel_bts(struct rsvp_head *data)
352{
353	int n = data->tgenerator >> 5;
354	u32 b = 1 << (data->tgenerator & 0x1F);
355
356	if (data->tmap[n] & b)
357		return 0;
358	data->tmap[n] |= b;
359	return 1;
360}
361
362static void tunnel_recycle(struct rsvp_head *data)
363{
364	struct rsvp_session **sht = data->ht;
365	u32 tmap[256/32];
366	int h1, h2;
367
368	memset(tmap, 0, sizeof(tmap));
369
370	for (h1 = 0; h1 < 256; h1++) {
371		struct rsvp_session *s;
372		for (s = sht[h1]; s; s = s->next) {
 
373			for (h2 = 0; h2 <= 16; h2++) {
374				struct rsvp_filter *f;
375
376				for (f = s->ht[h2]; f; f = f->next) {
 
377					if (f->tunnelhdr == 0)
378						continue;
379					data->tgenerator = f->res.classid;
380					tunnel_bts(data);
381				}
382			}
383		}
384	}
385
386	memcpy(data->tmap, tmap, sizeof(tmap));
387}
388
389static u32 gen_tunnel(struct rsvp_head *data)
390{
391	int i, k;
392
393	for (k = 0; k < 2; k++) {
394		for (i = 255; i > 0; i--) {
395			if (++data->tgenerator == 0)
396				data->tgenerator = 1;
397			if (tunnel_bts(data))
398				return data->tgenerator;
399		}
400		tunnel_recycle(data);
401	}
402	return 0;
403}
404
405static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
406	[TCA_RSVP_CLASSID]	= { .type = NLA_U32 },
407	[TCA_RSVP_DST]		= { .type = NLA_BINARY,
408				    .len = RSVP_DST_LEN * sizeof(u32) },
409	[TCA_RSVP_SRC]		= { .type = NLA_BINARY,
410				    .len = RSVP_DST_LEN * sizeof(u32) },
411	[TCA_RSVP_PINFO]	= { .len = sizeof(struct tc_rsvp_pinfo) },
412};
413
414static int rsvp_change(struct net *net, struct sk_buff *in_skb,
415		       struct tcf_proto *tp, unsigned long base,
416		       u32 handle,
417		       struct nlattr **tca,
418		       unsigned long *arg)
 
419{
420	struct rsvp_head *data = tp->root;
421	struct rsvp_filter *f, **fp;
422	struct rsvp_session *s, **sp;
 
 
423	struct tc_rsvp_pinfo *pinfo = NULL;
424	struct nlattr *opt = tca[TCA_OPTIONS];
425	struct nlattr *tb[TCA_RSVP_MAX + 1];
426	struct tcf_exts e;
427	unsigned int h1, h2;
428	__be32 *dst;
429	int err;
430
431	if (opt == NULL)
432		return handle ? -EINVAL : 0;
433
434	err = nla_parse_nested(tb, TCA_RSVP_MAX, opt, rsvp_policy);
 
435	if (err < 0)
436		return err;
437
438	tcf_exts_init(&e, TCA_RSVP_ACT, TCA_RSVP_POLICE);
439	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e);
440	if (err < 0)
441		return err;
 
 
 
 
442
443	f = (struct rsvp_filter *)*arg;
444	if (f) {
445		/* Node exists: adjust only classid */
 
446
447		if (f->handle != handle && handle)
448			goto errout2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
449		if (tb[TCA_RSVP_CLASSID]) {
450			f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
451			tcf_bind_filter(tp, &f->res, base);
452		}
453
454		tcf_exts_change(tp, &f->exts, &e);
 
455		return 0;
456	}
457
458	/* Now more serious part... */
459	err = -EINVAL;
460	if (handle)
461		goto errout2;
462	if (tb[TCA_RSVP_DST] == NULL)
463		goto errout2;
464
465	err = -ENOBUFS;
466	f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL);
467	if (f == NULL)
468		goto errout2;
469
470	tcf_exts_init(&f->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE);
 
 
471	h2 = 16;
472	if (tb[TCA_RSVP_SRC]) {
473		memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
474		h2 = hash_src(f->src);
475	}
476	if (tb[TCA_RSVP_PINFO]) {
477		pinfo = nla_data(tb[TCA_RSVP_PINFO]);
478		f->spi = pinfo->spi;
479		f->tunnelhdr = pinfo->tunnelhdr;
480	}
481	if (tb[TCA_RSVP_CLASSID])
482		f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
483
484	dst = nla_data(tb[TCA_RSVP_DST]);
485	h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
486
487	err = -ENOMEM;
488	if ((f->handle = gen_handle(tp, h1 | (h2<<8))) == 0)
489		goto errout;
490
491	if (f->tunnelhdr) {
492		err = -EINVAL;
493		if (f->res.classid > 255)
494			goto errout;
495
496		err = -ENOMEM;
497		if (f->res.classid == 0 &&
498		    (f->res.classid = gen_tunnel(data)) == 0)
499			goto errout;
500	}
501
502	for (sp = &data->ht[h1]; (s = *sp) != NULL; sp = &s->next) {
 
 
503		if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
504		    pinfo && pinfo->protocol == s->protocol &&
505		    memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
506#if RSVP_DST_LEN == 4
507		    dst[0] == s->dst[0] &&
508		    dst[1] == s->dst[1] &&
509		    dst[2] == s->dst[2] &&
510#endif
511		    pinfo->tunnelid == s->tunnelid) {
512
513insert:
514			/* OK, we found appropriate session */
515
516			fp = &s->ht[h2];
517
518			f->sess = s;
519			if (f->tunnelhdr == 0)
520				tcf_bind_filter(tp, &f->res, base);
521
522			tcf_exts_change(tp, &f->exts, &e);
523
524			for (fp = &s->ht[h2]; *fp; fp = &(*fp)->next)
525				if (((*fp)->spi.mask & f->spi.mask) != f->spi.mask)
 
 
 
 
526					break;
527			f->next = *fp;
528			wmb();
529			*fp = f;
530
531			*arg = (unsigned long)f;
532			return 0;
533		}
534	}
535
536	/* No session found. Create new one. */
537
538	err = -ENOBUFS;
539	s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL);
540	if (s == NULL)
541		goto errout;
542	memcpy(s->dst, dst, sizeof(s->dst));
543
544	if (pinfo) {
545		s->dpi = pinfo->dpi;
546		s->protocol = pinfo->protocol;
547		s->tunnelid = pinfo->tunnelid;
548	}
549	for (sp = &data->ht[h1]; *sp; sp = &(*sp)->next) {
550		if (((*sp)->dpi.mask&s->dpi.mask) != s->dpi.mask)
 
 
551			break;
552	}
553	s->next = *sp;
554	wmb();
555	*sp = s;
556
557	goto insert;
558
559errout:
 
560	kfree(f);
561errout2:
562	tcf_exts_destroy(tp, &e);
563	return err;
564}
565
566static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 
567{
568	struct rsvp_head *head = tp->root;
569	unsigned int h, h1;
570
571	if (arg->stop)
572		return;
573
574	for (h = 0; h < 256; h++) {
575		struct rsvp_session *s;
576
577		for (s = head->ht[h]; s; s = s->next) {
 
578			for (h1 = 0; h1 <= 16; h1++) {
579				struct rsvp_filter *f;
580
581				for (f = s->ht[h1]; f; f = f->next) {
 
582					if (arg->count < arg->skip) {
583						arg->count++;
584						continue;
585					}
586					if (arg->fn(tp, (unsigned long)f, arg) < 0) {
587						arg->stop = 1;
588						return;
589					}
590					arg->count++;
591				}
592			}
593		}
594	}
595}
596
597static int rsvp_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
598		     struct sk_buff *skb, struct tcmsg *t)
599{
600	struct rsvp_filter *f = (struct rsvp_filter *)fh;
601	struct rsvp_session *s;
602	unsigned char *b = skb_tail_pointer(skb);
603	struct nlattr *nest;
604	struct tc_rsvp_pinfo pinfo;
605
606	if (f == NULL)
607		return skb->len;
608	s = f->sess;
609
610	t->tcm_handle = f->handle;
611
612	nest = nla_nest_start(skb, TCA_OPTIONS);
613	if (nest == NULL)
614		goto nla_put_failure;
615
616	if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
617		goto nla_put_failure;
618	pinfo.dpi = s->dpi;
619	pinfo.spi = f->spi;
620	pinfo.protocol = s->protocol;
621	pinfo.tunnelid = s->tunnelid;
622	pinfo.tunnelhdr = f->tunnelhdr;
623	pinfo.pad = 0;
624	if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
625		goto nla_put_failure;
626	if (f->res.classid &&
627	    nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
628		goto nla_put_failure;
629	if (((f->handle >> 8) & 0xFF) != 16 &&
630	    nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
631		goto nla_put_failure;
632
633	if (tcf_exts_dump(skb, &f->exts) < 0)
634		goto nla_put_failure;
635
636	nla_nest_end(skb, nest);
637
638	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
639		goto nla_put_failure;
640	return skb->len;
641
642nla_put_failure:
643	nlmsg_trim(skb, b);
644	return -1;
645}
646
 
 
 
 
 
 
 
 
647static struct tcf_proto_ops RSVP_OPS __read_mostly = {
648	.kind		=	RSVP_ID,
649	.classify	=	rsvp_classify,
650	.init		=	rsvp_init,
651	.destroy	=	rsvp_destroy,
652	.get		=	rsvp_get,
653	.put		=	rsvp_put,
654	.change		=	rsvp_change,
655	.delete		=	rsvp_delete,
656	.walk		=	rsvp_walk,
657	.dump		=	rsvp_dump,
 
658	.owner		=	THIS_MODULE,
659};
660
661static int __init init_rsvp(void)
662{
663	return register_tcf_proto_ops(&RSVP_OPS);
664}
665
666static void __exit exit_rsvp(void)
667{
668	unregister_tcf_proto_ops(&RSVP_OPS);
669}
670
671module_init(init_rsvp)
672module_exit(exit_rsvp)