Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * net/sched/cls_rsvp.h	Template file for RSVPv[46] classifiers.
  4 *
 
 
 
 
 
  5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  6 */
  7
  8/*
  9   Comparing to general packet classification problem,
 10   RSVP needs only sevaral relatively simple rules:
 11
 12   * (dst, protocol) are always specified,
 13     so that we are able to hash them.
 14   * src may be exact, or may be wildcard, so that
 15     we can keep a hash table plus one wildcard entry.
 16   * source port (or flow label) is important only if src is given.
 17
 18   IMPLEMENTATION.
 19
 20   We use a two level hash table: The top level is keyed by
 21   destination address and protocol ID, every bucket contains a list
 22   of "rsvp sessions", identified by destination address, protocol and
 23   DPI(="Destination Port ID"): triple (key, mask, offset).
 24
 25   Every bucket has a smaller hash table keyed by source address
 26   (cf. RSVP flowspec) and one wildcard entry for wildcard reservations.
 27   Every bucket is again a list of "RSVP flows", selected by
 28   source address and SPI(="Source Port ID" here rather than
 29   "security parameter index"): triple (key, mask, offset).
 30
 31
 32   NOTE 1. All the packets with IPv6 extension headers (but AH and ESP)
 33   and all fragmented packets go to the best-effort traffic class.
 34
 35
 36   NOTE 2. Two "port id"'s seems to be redundant, rfc2207 requires
 37   only one "Generalized Port Identifier". So that for classic
 38   ah, esp (and udp,tcp) both *pi should coincide or one of them
 39   should be wildcard.
 40
 41   At first sight, this redundancy is just a waste of CPU
 42   resources. But DPI and SPI add the possibility to assign different
 43   priorities to GPIs. Look also at note 4 about tunnels below.
 44
 45
 46   NOTE 3. One complication is the case of tunneled packets.
 47   We implement it as following: if the first lookup
 48   matches a special session with "tunnelhdr" value not zero,
 49   flowid doesn't contain the true flow ID, but the tunnel ID (1...255).
 50   In this case, we pull tunnelhdr bytes and restart lookup
 51   with tunnel ID added to the list of keys. Simple and stupid 8)8)
 52   It's enough for PIMREG and IPIP.
 53
 54
 55   NOTE 4. Two GPIs make it possible to parse even GRE packets.
 56   F.e. DPI can select ETH_P_IP (and necessary flags to make
 57   tunnelhdr correct) in GRE protocol field and SPI matches
 58   GRE key. Is it not nice? 8)8)
 59
 60
 61   Well, as result, despite its simplicity, we get a pretty
 62   powerful classification engine.  */
 63
 64
 65struct rsvp_head {
 66	u32			tmap[256/32];
 67	u32			hgenerator;
 68	u8			tgenerator;
 69	struct rsvp_session __rcu *ht[256];
 70	struct rcu_head		rcu;
 71};
 72
 73struct rsvp_session {
 74	struct rsvp_session __rcu	*next;
 75	__be32				dst[RSVP_DST_LEN];
 76	struct tc_rsvp_gpi		dpi;
 77	u8				protocol;
 78	u8				tunnelid;
 79	/* 16 (src,sport) hash slots, and one wildcard source slot */
 80	struct rsvp_filter __rcu	*ht[16 + 1];
 81	struct rcu_head			rcu;
 82};
 83
 84
 85struct rsvp_filter {
 86	struct rsvp_filter __rcu	*next;
 87	__be32				src[RSVP_DST_LEN];
 88	struct tc_rsvp_gpi		spi;
 89	u8				tunnelhdr;
 90
 91	struct tcf_result		res;
 92	struct tcf_exts			exts;
 93
 94	u32				handle;
 95	struct rsvp_session		*sess;
 96	struct rcu_work			rwork;
 97};
 98
 99static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
100{
101	unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
102
103	h ^= h>>16;
104	h ^= h>>8;
105	return (h ^ protocol ^ tunnelid) & 0xFF;
106}
107
108static inline unsigned int hash_src(__be32 *src)
109{
110	unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
111
112	h ^= h>>16;
113	h ^= h>>8;
114	h ^= h>>4;
115	return h & 0xF;
116}
117
118#define RSVP_APPLY_RESULT()				\
119{							\
120	int r = tcf_exts_exec(skb, &f->exts, res);	\
121	if (r < 0)					\
122		continue;				\
123	else if (r > 0)					\
124		return r;				\
125}
126
127static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
128			 struct tcf_result *res)
129{
130	struct rsvp_head *head = rcu_dereference_bh(tp->root);
131	struct rsvp_session *s;
132	struct rsvp_filter *f;
133	unsigned int h1, h2;
134	__be32 *dst, *src;
135	u8 protocol;
136	u8 tunnelid = 0;
137	u8 *xprt;
138#if RSVP_DST_LEN == 4
139	struct ipv6hdr *nhptr;
140
141	if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
142		return -1;
143	nhptr = ipv6_hdr(skb);
144#else
145	struct iphdr *nhptr;
146
147	if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
148		return -1;
149	nhptr = ip_hdr(skb);
150#endif
 
151restart:
152
153#if RSVP_DST_LEN == 4
154	src = &nhptr->saddr.s6_addr32[0];
155	dst = &nhptr->daddr.s6_addr32[0];
156	protocol = nhptr->nexthdr;
157	xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
158#else
159	src = &nhptr->saddr;
160	dst = &nhptr->daddr;
161	protocol = nhptr->protocol;
162	xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
163	if (ip_is_fragment(nhptr))
164		return -1;
165#endif
166
167	h1 = hash_dst(dst, protocol, tunnelid);
168	h2 = hash_src(src);
169
170	for (s = rcu_dereference_bh(head->ht[h1]); s;
171	     s = rcu_dereference_bh(s->next)) {
172		if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
173		    protocol == s->protocol &&
174		    !(s->dpi.mask &
175		      (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
176#if RSVP_DST_LEN == 4
177		    dst[0] == s->dst[0] &&
178		    dst[1] == s->dst[1] &&
179		    dst[2] == s->dst[2] &&
180#endif
181		    tunnelid == s->tunnelid) {
182
183			for (f = rcu_dereference_bh(s->ht[h2]); f;
184			     f = rcu_dereference_bh(f->next)) {
185				if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
186				    !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
187#if RSVP_DST_LEN == 4
188				    &&
189				    src[0] == f->src[0] &&
190				    src[1] == f->src[1] &&
191				    src[2] == f->src[2]
192#endif
193				    ) {
194					*res = f->res;
195					RSVP_APPLY_RESULT();
196
197matched:
198					if (f->tunnelhdr == 0)
199						return 0;
200
201					tunnelid = f->res.classid;
202					nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
203					goto restart;
204				}
205			}
206
207			/* And wildcard bucket... */
208			for (f = rcu_dereference_bh(s->ht[16]); f;
209			     f = rcu_dereference_bh(f->next)) {
210				*res = f->res;
211				RSVP_APPLY_RESULT();
212				goto matched;
213			}
214			return -1;
215		}
216	}
217	return -1;
218}
219
220static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h)
221{
222	struct rsvp_head *head = rtnl_dereference(tp->root);
223	struct rsvp_session *s;
224	struct rsvp_filter __rcu **ins;
225	struct rsvp_filter *pins;
226	unsigned int h1 = h & 0xFF;
227	unsigned int h2 = (h >> 8) & 0xFF;
228
229	for (s = rtnl_dereference(head->ht[h1]); s;
230	     s = rtnl_dereference(s->next)) {
231		for (ins = &s->ht[h2], pins = rtnl_dereference(*ins); ;
232		     ins = &pins->next, pins = rtnl_dereference(*ins)) {
233			if (pins->handle == h) {
234				RCU_INIT_POINTER(n->next, pins->next);
235				rcu_assign_pointer(*ins, n);
236				return;
237			}
238		}
239	}
240
241	/* Something went wrong if we are trying to replace a non-existant
242	 * node. Mind as well halt instead of silently failing.
243	 */
244	BUG_ON(1);
245}
246
247static void *rsvp_get(struct tcf_proto *tp, u32 handle)
248{
249	struct rsvp_head *head = rtnl_dereference(tp->root);
250	struct rsvp_session *s;
251	struct rsvp_filter *f;
252	unsigned int h1 = handle & 0xFF;
253	unsigned int h2 = (handle >> 8) & 0xFF;
254
255	if (h2 > 16)
256		return NULL;
257
258	for (s = rtnl_dereference(head->ht[h1]); s;
259	     s = rtnl_dereference(s->next)) {
260		for (f = rtnl_dereference(s->ht[h2]); f;
261		     f = rtnl_dereference(f->next)) {
262			if (f->handle == handle)
263				return f;
264		}
265	}
266	return NULL;
267}
268
269static int rsvp_init(struct tcf_proto *tp)
270{
271	struct rsvp_head *data;
272
273	data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL);
274	if (data) {
275		rcu_assign_pointer(tp->root, data);
276		return 0;
277	}
278	return -ENOBUFS;
279}
280
281static void __rsvp_delete_filter(struct rsvp_filter *f)
282{
 
 
283	tcf_exts_destroy(&f->exts);
284	tcf_exts_put_net(&f->exts);
285	kfree(f);
286}
287
288static void rsvp_delete_filter_work(struct work_struct *work)
289{
290	struct rsvp_filter *f = container_of(to_rcu_work(work),
291					     struct rsvp_filter,
292					     rwork);
293	rtnl_lock();
294	__rsvp_delete_filter(f);
295	rtnl_unlock();
296}
297
298static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
299{
300	tcf_unbind_filter(tp, &f->res);
301	/* all classifiers are required to call tcf_exts_destroy() after rcu
302	 * grace period, since converted-to-rcu actions are relying on that
303	 * in cleanup() callback
304	 */
305	if (tcf_exts_get_net(&f->exts))
306		tcf_queue_work(&f->rwork, rsvp_delete_filter_work);
307	else
308		__rsvp_delete_filter(f);
309}
310
311static void rsvp_destroy(struct tcf_proto *tp, bool rtnl_held,
312			 struct netlink_ext_ack *extack)
313{
314	struct rsvp_head *data = rtnl_dereference(tp->root);
315	int h1, h2;
316
317	if (data == NULL)
318		return;
 
 
 
 
 
 
 
 
 
319
320	for (h1 = 0; h1 < 256; h1++) {
321		struct rsvp_session *s;
322
323		while ((s = rtnl_dereference(data->ht[h1])) != NULL) {
324			RCU_INIT_POINTER(data->ht[h1], s->next);
325
326			for (h2 = 0; h2 <= 16; h2++) {
327				struct rsvp_filter *f;
328
329				while ((f = rtnl_dereference(s->ht[h2])) != NULL) {
330					rcu_assign_pointer(s->ht[h2], f->next);
331					rsvp_delete_filter(tp, f);
332				}
333			}
334			kfree_rcu(s, rcu);
335		}
336	}
337	kfree_rcu(data, rcu);
 
338}
339
340static int rsvp_delete(struct tcf_proto *tp, void *arg, bool *last,
341		       bool rtnl_held, struct netlink_ext_ack *extack)
342{
343	struct rsvp_head *head = rtnl_dereference(tp->root);
344	struct rsvp_filter *nfp, *f = arg;
345	struct rsvp_filter __rcu **fp;
346	unsigned int h = f->handle;
347	struct rsvp_session __rcu **sp;
348	struct rsvp_session *nsp, *s = f->sess;
349	int i, h1;
350
351	fp = &s->ht[(h >> 8) & 0xFF];
352	for (nfp = rtnl_dereference(*fp); nfp;
353	     fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
354		if (nfp == f) {
355			RCU_INIT_POINTER(*fp, f->next);
356			rsvp_delete_filter(tp, f);
357
358			/* Strip tree */
359
360			for (i = 0; i <= 16; i++)
361				if (s->ht[i])
362					goto out;
363
364			/* OK, session has no flows */
365			sp = &head->ht[h & 0xFF];
366			for (nsp = rtnl_dereference(*sp); nsp;
367			     sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
368				if (nsp == s) {
369					RCU_INIT_POINTER(*sp, s->next);
370					kfree_rcu(s, rcu);
371					goto out;
372				}
373			}
374
375			break;
376		}
377	}
378
379out:
380	*last = true;
381	for (h1 = 0; h1 < 256; h1++) {
382		if (rcu_access_pointer(head->ht[h1])) {
383			*last = false;
384			break;
385		}
386	}
387
388	return 0;
389}
390
391static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
392{
393	struct rsvp_head *data = rtnl_dereference(tp->root);
394	int i = 0xFFFF;
395
396	while (i-- > 0) {
397		u32 h;
398
399		if ((data->hgenerator += 0x10000) == 0)
400			data->hgenerator = 0x10000;
401		h = data->hgenerator|salt;
402		if (!rsvp_get(tp, h))
403			return h;
404	}
405	return 0;
406}
407
408static int tunnel_bts(struct rsvp_head *data)
409{
410	int n = data->tgenerator >> 5;
411	u32 b = 1 << (data->tgenerator & 0x1F);
412
413	if (data->tmap[n] & b)
414		return 0;
415	data->tmap[n] |= b;
416	return 1;
417}
418
419static void tunnel_recycle(struct rsvp_head *data)
420{
421	struct rsvp_session __rcu **sht = data->ht;
422	u32 tmap[256/32];
423	int h1, h2;
424
425	memset(tmap, 0, sizeof(tmap));
426
427	for (h1 = 0; h1 < 256; h1++) {
428		struct rsvp_session *s;
429		for (s = rtnl_dereference(sht[h1]); s;
430		     s = rtnl_dereference(s->next)) {
431			for (h2 = 0; h2 <= 16; h2++) {
432				struct rsvp_filter *f;
433
434				for (f = rtnl_dereference(s->ht[h2]); f;
435				     f = rtnl_dereference(f->next)) {
436					if (f->tunnelhdr == 0)
437						continue;
438					data->tgenerator = f->res.classid;
439					tunnel_bts(data);
440				}
441			}
442		}
443	}
444
445	memcpy(data->tmap, tmap, sizeof(tmap));
446}
447
448static u32 gen_tunnel(struct rsvp_head *data)
449{
450	int i, k;
451
452	for (k = 0; k < 2; k++) {
453		for (i = 255; i > 0; i--) {
454			if (++data->tgenerator == 0)
455				data->tgenerator = 1;
456			if (tunnel_bts(data))
457				return data->tgenerator;
458		}
459		tunnel_recycle(data);
460	}
461	return 0;
462}
463
464static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
465	[TCA_RSVP_CLASSID]	= { .type = NLA_U32 },
466	[TCA_RSVP_DST]		= { .type = NLA_BINARY,
467				    .len = RSVP_DST_LEN * sizeof(u32) },
468	[TCA_RSVP_SRC]		= { .type = NLA_BINARY,
469				    .len = RSVP_DST_LEN * sizeof(u32) },
470	[TCA_RSVP_PINFO]	= { .len = sizeof(struct tc_rsvp_pinfo) },
471};
472
473static int rsvp_change(struct net *net, struct sk_buff *in_skb,
474		       struct tcf_proto *tp, unsigned long base,
475		       u32 handle,
476		       struct nlattr **tca,
477		       void **arg, bool ovr, bool rtnl_held,
478		       struct netlink_ext_ack *extack)
479{
480	struct rsvp_head *data = rtnl_dereference(tp->root);
481	struct rsvp_filter *f, *nfp;
482	struct rsvp_filter __rcu **fp;
483	struct rsvp_session *nsp, *s;
484	struct rsvp_session __rcu **sp;
485	struct tc_rsvp_pinfo *pinfo = NULL;
486	struct nlattr *opt = tca[TCA_OPTIONS];
487	struct nlattr *tb[TCA_RSVP_MAX + 1];
488	struct tcf_exts e;
489	unsigned int h1, h2;
490	__be32 *dst;
491	int err;
492
493	if (opt == NULL)
494		return handle ? -EINVAL : 0;
495
496	err = nla_parse_nested_deprecated(tb, TCA_RSVP_MAX, opt, rsvp_policy,
497					  NULL);
498	if (err < 0)
499		return err;
500
501	err = tcf_exts_init(&e, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
 
502	if (err < 0)
503		return err;
504	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr, true,
505				extack);
506	if (err < 0)
507		goto errout2;
508
509	f = *arg;
510	if (f) {
511		/* Node exists: adjust only classid */
512		struct rsvp_filter *n;
513
514		if (f->handle != handle && handle)
515			goto errout2;
516
517		n = kmemdup(f, sizeof(*f), GFP_KERNEL);
518		if (!n) {
519			err = -ENOMEM;
520			goto errout2;
521		}
522
523		err = tcf_exts_init(&n->exts, net, TCA_RSVP_ACT,
524				    TCA_RSVP_POLICE);
525		if (err < 0) {
526			kfree(n);
527			goto errout2;
528		}
529
530		if (tb[TCA_RSVP_CLASSID]) {
531			n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
532			tcf_bind_filter(tp, &n->res, base);
533		}
534
535		tcf_exts_change(&n->exts, &e);
536		rsvp_replace(tp, n, handle);
537		return 0;
538	}
539
540	/* Now more serious part... */
541	err = -EINVAL;
542	if (handle)
543		goto errout2;
544	if (tb[TCA_RSVP_DST] == NULL)
545		goto errout2;
546
547	err = -ENOBUFS;
548	f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL);
549	if (f == NULL)
550		goto errout2;
551
552	err = tcf_exts_init(&f->exts, net, TCA_RSVP_ACT, TCA_RSVP_POLICE);
553	if (err < 0)
554		goto errout;
555	h2 = 16;
556	if (tb[TCA_RSVP_SRC]) {
557		memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
558		h2 = hash_src(f->src);
559	}
560	if (tb[TCA_RSVP_PINFO]) {
561		pinfo = nla_data(tb[TCA_RSVP_PINFO]);
562		f->spi = pinfo->spi;
563		f->tunnelhdr = pinfo->tunnelhdr;
564	}
565	if (tb[TCA_RSVP_CLASSID])
566		f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
567
568	dst = nla_data(tb[TCA_RSVP_DST]);
569	h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
570
571	err = -ENOMEM;
572	if ((f->handle = gen_handle(tp, h1 | (h2<<8))) == 0)
573		goto errout;
574
575	if (f->tunnelhdr) {
576		err = -EINVAL;
577		if (f->res.classid > 255)
578			goto errout;
579
580		err = -ENOMEM;
581		if (f->res.classid == 0 &&
582		    (f->res.classid = gen_tunnel(data)) == 0)
583			goto errout;
584	}
585
586	for (sp = &data->ht[h1];
587	     (s = rtnl_dereference(*sp)) != NULL;
588	     sp = &s->next) {
589		if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
590		    pinfo && pinfo->protocol == s->protocol &&
591		    memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
592#if RSVP_DST_LEN == 4
593		    dst[0] == s->dst[0] &&
594		    dst[1] == s->dst[1] &&
595		    dst[2] == s->dst[2] &&
596#endif
597		    pinfo->tunnelid == s->tunnelid) {
598
599insert:
600			/* OK, we found appropriate session */
601
602			fp = &s->ht[h2];
603
604			f->sess = s;
605			if (f->tunnelhdr == 0)
606				tcf_bind_filter(tp, &f->res, base);
607
608			tcf_exts_change(&f->exts, &e);
609
610			fp = &s->ht[h2];
611			for (nfp = rtnl_dereference(*fp); nfp;
612			     fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
613				__u32 mask = nfp->spi.mask & f->spi.mask;
614
615				if (mask != f->spi.mask)
616					break;
617			}
618			RCU_INIT_POINTER(f->next, nfp);
619			rcu_assign_pointer(*fp, f);
620
621			*arg = f;
622			return 0;
623		}
624	}
625
626	/* No session found. Create new one. */
627
628	err = -ENOBUFS;
629	s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL);
630	if (s == NULL)
631		goto errout;
632	memcpy(s->dst, dst, sizeof(s->dst));
633
634	if (pinfo) {
635		s->dpi = pinfo->dpi;
636		s->protocol = pinfo->protocol;
637		s->tunnelid = pinfo->tunnelid;
638	}
639	sp = &data->ht[h1];
640	for (nsp = rtnl_dereference(*sp); nsp;
641	     sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
642		if ((nsp->dpi.mask & s->dpi.mask) != s->dpi.mask)
643			break;
644	}
645	RCU_INIT_POINTER(s->next, nsp);
646	rcu_assign_pointer(*sp, s);
647
648	goto insert;
649
650errout:
651	tcf_exts_destroy(&f->exts);
652	kfree(f);
653errout2:
654	tcf_exts_destroy(&e);
655	return err;
656}
657
658static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg,
659		      bool rtnl_held)
660{
661	struct rsvp_head *head = rtnl_dereference(tp->root);
662	unsigned int h, h1;
663
664	if (arg->stop)
665		return;
666
667	for (h = 0; h < 256; h++) {
668		struct rsvp_session *s;
669
670		for (s = rtnl_dereference(head->ht[h]); s;
671		     s = rtnl_dereference(s->next)) {
672			for (h1 = 0; h1 <= 16; h1++) {
673				struct rsvp_filter *f;
674
675				for (f = rtnl_dereference(s->ht[h1]); f;
676				     f = rtnl_dereference(f->next)) {
677					if (arg->count < arg->skip) {
678						arg->count++;
679						continue;
680					}
681					if (arg->fn(tp, f, arg) < 0) {
682						arg->stop = 1;
683						return;
684					}
685					arg->count++;
686				}
687			}
688		}
689	}
690}
691
692static int rsvp_dump(struct net *net, struct tcf_proto *tp, void *fh,
693		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
694{
695	struct rsvp_filter *f = fh;
696	struct rsvp_session *s;
697	struct nlattr *nest;
698	struct tc_rsvp_pinfo pinfo;
699
700	if (f == NULL)
701		return skb->len;
702	s = f->sess;
703
704	t->tcm_handle = f->handle;
705
706	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
707	if (nest == NULL)
708		goto nla_put_failure;
709
710	if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
711		goto nla_put_failure;
712	pinfo.dpi = s->dpi;
713	pinfo.spi = f->spi;
714	pinfo.protocol = s->protocol;
715	pinfo.tunnelid = s->tunnelid;
716	pinfo.tunnelhdr = f->tunnelhdr;
717	pinfo.pad = 0;
718	if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
719		goto nla_put_failure;
720	if (f->res.classid &&
721	    nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
722		goto nla_put_failure;
723	if (((f->handle >> 8) & 0xFF) != 16 &&
724	    nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
725		goto nla_put_failure;
726
727	if (tcf_exts_dump(skb, &f->exts) < 0)
728		goto nla_put_failure;
729
730	nla_nest_end(skb, nest);
731
732	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
733		goto nla_put_failure;
734	return skb->len;
735
736nla_put_failure:
737	nla_nest_cancel(skb, nest);
738	return -1;
739}
740
741static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl)
742{
743	struct rsvp_filter *f = fh;
744
745	if (f && f->res.classid == classid)
746		f->res.class = cl;
747}
748
749static struct tcf_proto_ops RSVP_OPS __read_mostly = {
750	.kind		=	RSVP_ID,
751	.classify	=	rsvp_classify,
752	.init		=	rsvp_init,
753	.destroy	=	rsvp_destroy,
754	.get		=	rsvp_get,
755	.change		=	rsvp_change,
756	.delete		=	rsvp_delete,
757	.walk		=	rsvp_walk,
758	.dump		=	rsvp_dump,
759	.bind_class	=	rsvp_bind_class,
760	.owner		=	THIS_MODULE,
761};
762
763static int __init init_rsvp(void)
764{
765	return register_tcf_proto_ops(&RSVP_OPS);
766}
767
768static void __exit exit_rsvp(void)
769{
770	unregister_tcf_proto_ops(&RSVP_OPS);
771}
772
773module_init(init_rsvp)
774module_exit(exit_rsvp)
v4.6
 
  1/*
  2 * net/sched/cls_rsvp.h	Template file for RSVPv[46] classifiers.
  3 *
  4 *		This program is free software; you can redistribute it and/or
  5 *		modify it under the terms of the GNU General Public License
  6 *		as published by the Free Software Foundation; either version
  7 *		2 of the License, or (at your option) any later version.
  8 *
  9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 10 */
 11
 12/*
 13   Comparing to general packet classification problem,
 14   RSVP needs only sevaral relatively simple rules:
 15
 16   * (dst, protocol) are always specified,
 17     so that we are able to hash them.
 18   * src may be exact, or may be wildcard, so that
 19     we can keep a hash table plus one wildcard entry.
 20   * source port (or flow label) is important only if src is given.
 21
 22   IMPLEMENTATION.
 23
 24   We use a two level hash table: The top level is keyed by
 25   destination address and protocol ID, every bucket contains a list
 26   of "rsvp sessions", identified by destination address, protocol and
 27   DPI(="Destination Port ID"): triple (key, mask, offset).
 28
 29   Every bucket has a smaller hash table keyed by source address
 30   (cf. RSVP flowspec) and one wildcard entry for wildcard reservations.
 31   Every bucket is again a list of "RSVP flows", selected by
 32   source address and SPI(="Source Port ID" here rather than
 33   "security parameter index"): triple (key, mask, offset).
 34
 35
 36   NOTE 1. All the packets with IPv6 extension headers (but AH and ESP)
 37   and all fragmented packets go to the best-effort traffic class.
 38
 39
 40   NOTE 2. Two "port id"'s seems to be redundant, rfc2207 requires
 41   only one "Generalized Port Identifier". So that for classic
 42   ah, esp (and udp,tcp) both *pi should coincide or one of them
 43   should be wildcard.
 44
 45   At first sight, this redundancy is just a waste of CPU
 46   resources. But DPI and SPI add the possibility to assign different
 47   priorities to GPIs. Look also at note 4 about tunnels below.
 48
 49
 50   NOTE 3. One complication is the case of tunneled packets.
 51   We implement it as following: if the first lookup
 52   matches a special session with "tunnelhdr" value not zero,
 53   flowid doesn't contain the true flow ID, but the tunnel ID (1...255).
 54   In this case, we pull tunnelhdr bytes and restart lookup
 55   with tunnel ID added to the list of keys. Simple and stupid 8)8)
 56   It's enough for PIMREG and IPIP.
 57
 58
 59   NOTE 4. Two GPIs make it possible to parse even GRE packets.
 60   F.e. DPI can select ETH_P_IP (and necessary flags to make
 61   tunnelhdr correct) in GRE protocol field and SPI matches
 62   GRE key. Is it not nice? 8)8)
 63
 64
 65   Well, as result, despite its simplicity, we get a pretty
 66   powerful classification engine.  */
 67
 68
 69struct rsvp_head {
 70	u32			tmap[256/32];
 71	u32			hgenerator;
 72	u8			tgenerator;
 73	struct rsvp_session __rcu *ht[256];
 74	struct rcu_head		rcu;
 75};
 76
 77struct rsvp_session {
 78	struct rsvp_session __rcu	*next;
 79	__be32				dst[RSVP_DST_LEN];
 80	struct tc_rsvp_gpi		dpi;
 81	u8				protocol;
 82	u8				tunnelid;
 83	/* 16 (src,sport) hash slots, and one wildcard source slot */
 84	struct rsvp_filter __rcu	*ht[16 + 1];
 85	struct rcu_head			rcu;
 86};
 87
 88
 89struct rsvp_filter {
 90	struct rsvp_filter __rcu	*next;
 91	__be32				src[RSVP_DST_LEN];
 92	struct tc_rsvp_gpi		spi;
 93	u8				tunnelhdr;
 94
 95	struct tcf_result		res;
 96	struct tcf_exts			exts;
 97
 98	u32				handle;
 99	struct rsvp_session		*sess;
100	struct rcu_head			rcu;
101};
102
103static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
104{
105	unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
106
107	h ^= h>>16;
108	h ^= h>>8;
109	return (h ^ protocol ^ tunnelid) & 0xFF;
110}
111
112static inline unsigned int hash_src(__be32 *src)
113{
114	unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
115
116	h ^= h>>16;
117	h ^= h>>8;
118	h ^= h>>4;
119	return h & 0xF;
120}
121
122#define RSVP_APPLY_RESULT()				\
123{							\
124	int r = tcf_exts_exec(skb, &f->exts, res);	\
125	if (r < 0)					\
126		continue;				\
127	else if (r > 0)					\
128		return r;				\
129}
130
131static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
132			 struct tcf_result *res)
133{
134	struct rsvp_head *head = rcu_dereference_bh(tp->root);
135	struct rsvp_session *s;
136	struct rsvp_filter *f;
137	unsigned int h1, h2;
138	__be32 *dst, *src;
139	u8 protocol;
140	u8 tunnelid = 0;
141	u8 *xprt;
142#if RSVP_DST_LEN == 4
143	struct ipv6hdr *nhptr;
144
145	if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
146		return -1;
147	nhptr = ipv6_hdr(skb);
148#else
149	struct iphdr *nhptr;
150
151	if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
152		return -1;
153	nhptr = ip_hdr(skb);
154#endif
155
156restart:
157
158#if RSVP_DST_LEN == 4
159	src = &nhptr->saddr.s6_addr32[0];
160	dst = &nhptr->daddr.s6_addr32[0];
161	protocol = nhptr->nexthdr;
162	xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
163#else
164	src = &nhptr->saddr;
165	dst = &nhptr->daddr;
166	protocol = nhptr->protocol;
167	xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
168	if (ip_is_fragment(nhptr))
169		return -1;
170#endif
171
172	h1 = hash_dst(dst, protocol, tunnelid);
173	h2 = hash_src(src);
174
175	for (s = rcu_dereference_bh(head->ht[h1]); s;
176	     s = rcu_dereference_bh(s->next)) {
177		if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
178		    protocol == s->protocol &&
179		    !(s->dpi.mask &
180		      (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
181#if RSVP_DST_LEN == 4
182		    dst[0] == s->dst[0] &&
183		    dst[1] == s->dst[1] &&
184		    dst[2] == s->dst[2] &&
185#endif
186		    tunnelid == s->tunnelid) {
187
188			for (f = rcu_dereference_bh(s->ht[h2]); f;
189			     f = rcu_dereference_bh(f->next)) {
190				if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
191				    !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
192#if RSVP_DST_LEN == 4
193				    &&
194				    src[0] == f->src[0] &&
195				    src[1] == f->src[1] &&
196				    src[2] == f->src[2]
197#endif
198				    ) {
199					*res = f->res;
200					RSVP_APPLY_RESULT();
201
202matched:
203					if (f->tunnelhdr == 0)
204						return 0;
205
206					tunnelid = f->res.classid;
207					nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
208					goto restart;
209				}
210			}
211
212			/* And wildcard bucket... */
213			for (f = rcu_dereference_bh(s->ht[16]); f;
214			     f = rcu_dereference_bh(f->next)) {
215				*res = f->res;
216				RSVP_APPLY_RESULT();
217				goto matched;
218			}
219			return -1;
220		}
221	}
222	return -1;
223}
224
225static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h)
226{
227	struct rsvp_head *head = rtnl_dereference(tp->root);
228	struct rsvp_session *s;
229	struct rsvp_filter __rcu **ins;
230	struct rsvp_filter *pins;
231	unsigned int h1 = h & 0xFF;
232	unsigned int h2 = (h >> 8) & 0xFF;
233
234	for (s = rtnl_dereference(head->ht[h1]); s;
235	     s = rtnl_dereference(s->next)) {
236		for (ins = &s->ht[h2], pins = rtnl_dereference(*ins); ;
237		     ins = &pins->next, pins = rtnl_dereference(*ins)) {
238			if (pins->handle == h) {
239				RCU_INIT_POINTER(n->next, pins->next);
240				rcu_assign_pointer(*ins, n);
241				return;
242			}
243		}
244	}
245
246	/* Something went wrong if we are trying to replace a non-existant
247	 * node. Mind as well halt instead of silently failing.
248	 */
249	BUG_ON(1);
250}
251
252static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle)
253{
254	struct rsvp_head *head = rtnl_dereference(tp->root);
255	struct rsvp_session *s;
256	struct rsvp_filter *f;
257	unsigned int h1 = handle & 0xFF;
258	unsigned int h2 = (handle >> 8) & 0xFF;
259
260	if (h2 > 16)
261		return 0;
262
263	for (s = rtnl_dereference(head->ht[h1]); s;
264	     s = rtnl_dereference(s->next)) {
265		for (f = rtnl_dereference(s->ht[h2]); f;
266		     f = rtnl_dereference(f->next)) {
267			if (f->handle == handle)
268				return (unsigned long)f;
269		}
270	}
271	return 0;
272}
273
274static int rsvp_init(struct tcf_proto *tp)
275{
276	struct rsvp_head *data;
277
278	data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL);
279	if (data) {
280		rcu_assign_pointer(tp->root, data);
281		return 0;
282	}
283	return -ENOBUFS;
284}
285
286static void rsvp_delete_filter_rcu(struct rcu_head *head)
287{
288	struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu);
289
290	tcf_exts_destroy(&f->exts);
 
291	kfree(f);
292}
293
 
 
 
 
 
 
 
 
 
 
294static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
295{
296	tcf_unbind_filter(tp, &f->res);
297	/* all classifiers are required to call tcf_exts_destroy() after rcu
298	 * grace period, since converted-to-rcu actions are relying on that
299	 * in cleanup() callback
300	 */
301	call_rcu(&f->rcu, rsvp_delete_filter_rcu);
 
 
 
302}
303
304static bool rsvp_destroy(struct tcf_proto *tp, bool force)
 
305{
306	struct rsvp_head *data = rtnl_dereference(tp->root);
307	int h1, h2;
308
309	if (data == NULL)
310		return true;
311
312	if (!force) {
313		for (h1 = 0; h1 < 256; h1++) {
314			if (rcu_access_pointer(data->ht[h1]))
315				return false;
316		}
317	}
318
319	RCU_INIT_POINTER(tp->root, NULL);
320
321	for (h1 = 0; h1 < 256; h1++) {
322		struct rsvp_session *s;
323
324		while ((s = rtnl_dereference(data->ht[h1])) != NULL) {
325			RCU_INIT_POINTER(data->ht[h1], s->next);
326
327			for (h2 = 0; h2 <= 16; h2++) {
328				struct rsvp_filter *f;
329
330				while ((f = rtnl_dereference(s->ht[h2])) != NULL) {
331					rcu_assign_pointer(s->ht[h2], f->next);
332					rsvp_delete_filter(tp, f);
333				}
334			}
335			kfree_rcu(s, rcu);
336		}
337	}
338	kfree_rcu(data, rcu);
339	return true;
340}
341
342static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
 
343{
344	struct rsvp_head *head = rtnl_dereference(tp->root);
345	struct rsvp_filter *nfp, *f = (struct rsvp_filter *)arg;
346	struct rsvp_filter __rcu **fp;
347	unsigned int h = f->handle;
348	struct rsvp_session __rcu **sp;
349	struct rsvp_session *nsp, *s = f->sess;
350	int i;
351
352	fp = &s->ht[(h >> 8) & 0xFF];
353	for (nfp = rtnl_dereference(*fp); nfp;
354	     fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
355		if (nfp == f) {
356			RCU_INIT_POINTER(*fp, f->next);
357			rsvp_delete_filter(tp, f);
358
359			/* Strip tree */
360
361			for (i = 0; i <= 16; i++)
362				if (s->ht[i])
363					return 0;
364
365			/* OK, session has no flows */
366			sp = &head->ht[h & 0xFF];
367			for (nsp = rtnl_dereference(*sp); nsp;
368			     sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
369				if (nsp == s) {
370					RCU_INIT_POINTER(*sp, s->next);
371					kfree_rcu(s, rcu);
372					return 0;
373				}
374			}
375
376			return 0;
 
 
 
 
 
 
 
 
 
377		}
378	}
 
379	return 0;
380}
381
382static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
383{
384	struct rsvp_head *data = rtnl_dereference(tp->root);
385	int i = 0xFFFF;
386
387	while (i-- > 0) {
388		u32 h;
389
390		if ((data->hgenerator += 0x10000) == 0)
391			data->hgenerator = 0x10000;
392		h = data->hgenerator|salt;
393		if (rsvp_get(tp, h) == 0)
394			return h;
395	}
396	return 0;
397}
398
399static int tunnel_bts(struct rsvp_head *data)
400{
401	int n = data->tgenerator >> 5;
402	u32 b = 1 << (data->tgenerator & 0x1F);
403
404	if (data->tmap[n] & b)
405		return 0;
406	data->tmap[n] |= b;
407	return 1;
408}
409
410static void tunnel_recycle(struct rsvp_head *data)
411{
412	struct rsvp_session __rcu **sht = data->ht;
413	u32 tmap[256/32];
414	int h1, h2;
415
416	memset(tmap, 0, sizeof(tmap));
417
418	for (h1 = 0; h1 < 256; h1++) {
419		struct rsvp_session *s;
420		for (s = rtnl_dereference(sht[h1]); s;
421		     s = rtnl_dereference(s->next)) {
422			for (h2 = 0; h2 <= 16; h2++) {
423				struct rsvp_filter *f;
424
425				for (f = rtnl_dereference(s->ht[h2]); f;
426				     f = rtnl_dereference(f->next)) {
427					if (f->tunnelhdr == 0)
428						continue;
429					data->tgenerator = f->res.classid;
430					tunnel_bts(data);
431				}
432			}
433		}
434	}
435
436	memcpy(data->tmap, tmap, sizeof(tmap));
437}
438
439static u32 gen_tunnel(struct rsvp_head *data)
440{
441	int i, k;
442
443	for (k = 0; k < 2; k++) {
444		for (i = 255; i > 0; i--) {
445			if (++data->tgenerator == 0)
446				data->tgenerator = 1;
447			if (tunnel_bts(data))
448				return data->tgenerator;
449		}
450		tunnel_recycle(data);
451	}
452	return 0;
453}
454
455static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
456	[TCA_RSVP_CLASSID]	= { .type = NLA_U32 },
457	[TCA_RSVP_DST]		= { .type = NLA_BINARY,
458				    .len = RSVP_DST_LEN * sizeof(u32) },
459	[TCA_RSVP_SRC]		= { .type = NLA_BINARY,
460				    .len = RSVP_DST_LEN * sizeof(u32) },
461	[TCA_RSVP_PINFO]	= { .len = sizeof(struct tc_rsvp_pinfo) },
462};
463
464static int rsvp_change(struct net *net, struct sk_buff *in_skb,
465		       struct tcf_proto *tp, unsigned long base,
466		       u32 handle,
467		       struct nlattr **tca,
468		       unsigned long *arg, bool ovr)
 
469{
470	struct rsvp_head *data = rtnl_dereference(tp->root);
471	struct rsvp_filter *f, *nfp;
472	struct rsvp_filter __rcu **fp;
473	struct rsvp_session *nsp, *s;
474	struct rsvp_session __rcu **sp;
475	struct tc_rsvp_pinfo *pinfo = NULL;
476	struct nlattr *opt = tca[TCA_OPTIONS];
477	struct nlattr *tb[TCA_RSVP_MAX + 1];
478	struct tcf_exts e;
479	unsigned int h1, h2;
480	__be32 *dst;
481	int err;
482
483	if (opt == NULL)
484		return handle ? -EINVAL : 0;
485
486	err = nla_parse_nested(tb, TCA_RSVP_MAX, opt, rsvp_policy);
 
487	if (err < 0)
488		return err;
489
490	tcf_exts_init(&e, TCA_RSVP_ACT, TCA_RSVP_POLICE);
491	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
492	if (err < 0)
493		return err;
 
 
 
 
494
495	f = (struct rsvp_filter *)*arg;
496	if (f) {
497		/* Node exists: adjust only classid */
498		struct rsvp_filter *n;
499
500		if (f->handle != handle && handle)
501			goto errout2;
502
503		n = kmemdup(f, sizeof(*f), GFP_KERNEL);
504		if (!n) {
505			err = -ENOMEM;
506			goto errout2;
507		}
508
509		tcf_exts_init(&n->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE);
 
 
 
 
 
510
511		if (tb[TCA_RSVP_CLASSID]) {
512			n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
513			tcf_bind_filter(tp, &n->res, base);
514		}
515
516		tcf_exts_change(tp, &n->exts, &e);
517		rsvp_replace(tp, n, handle);
518		return 0;
519	}
520
521	/* Now more serious part... */
522	err = -EINVAL;
523	if (handle)
524		goto errout2;
525	if (tb[TCA_RSVP_DST] == NULL)
526		goto errout2;
527
528	err = -ENOBUFS;
529	f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL);
530	if (f == NULL)
531		goto errout2;
532
533	tcf_exts_init(&f->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE);
 
 
534	h2 = 16;
535	if (tb[TCA_RSVP_SRC]) {
536		memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src));
537		h2 = hash_src(f->src);
538	}
539	if (tb[TCA_RSVP_PINFO]) {
540		pinfo = nla_data(tb[TCA_RSVP_PINFO]);
541		f->spi = pinfo->spi;
542		f->tunnelhdr = pinfo->tunnelhdr;
543	}
544	if (tb[TCA_RSVP_CLASSID])
545		f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
546
547	dst = nla_data(tb[TCA_RSVP_DST]);
548	h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0);
549
550	err = -ENOMEM;
551	if ((f->handle = gen_handle(tp, h1 | (h2<<8))) == 0)
552		goto errout;
553
554	if (f->tunnelhdr) {
555		err = -EINVAL;
556		if (f->res.classid > 255)
557			goto errout;
558
559		err = -ENOMEM;
560		if (f->res.classid == 0 &&
561		    (f->res.classid = gen_tunnel(data)) == 0)
562			goto errout;
563	}
564
565	for (sp = &data->ht[h1];
566	     (s = rtnl_dereference(*sp)) != NULL;
567	     sp = &s->next) {
568		if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
569		    pinfo && pinfo->protocol == s->protocol &&
570		    memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
571#if RSVP_DST_LEN == 4
572		    dst[0] == s->dst[0] &&
573		    dst[1] == s->dst[1] &&
574		    dst[2] == s->dst[2] &&
575#endif
576		    pinfo->tunnelid == s->tunnelid) {
577
578insert:
579			/* OK, we found appropriate session */
580
581			fp = &s->ht[h2];
582
583			f->sess = s;
584			if (f->tunnelhdr == 0)
585				tcf_bind_filter(tp, &f->res, base);
586
587			tcf_exts_change(tp, &f->exts, &e);
588
589			fp = &s->ht[h2];
590			for (nfp = rtnl_dereference(*fp); nfp;
591			     fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
592				__u32 mask = nfp->spi.mask & f->spi.mask;
593
594				if (mask != f->spi.mask)
595					break;
596			}
597			RCU_INIT_POINTER(f->next, nfp);
598			rcu_assign_pointer(*fp, f);
599
600			*arg = (unsigned long)f;
601			return 0;
602		}
603	}
604
605	/* No session found. Create new one. */
606
607	err = -ENOBUFS;
608	s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL);
609	if (s == NULL)
610		goto errout;
611	memcpy(s->dst, dst, sizeof(s->dst));
612
613	if (pinfo) {
614		s->dpi = pinfo->dpi;
615		s->protocol = pinfo->protocol;
616		s->tunnelid = pinfo->tunnelid;
617	}
618	sp = &data->ht[h1];
619	for (nsp = rtnl_dereference(*sp); nsp;
620	     sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
621		if ((nsp->dpi.mask & s->dpi.mask) != s->dpi.mask)
622			break;
623	}
624	RCU_INIT_POINTER(s->next, nsp);
625	rcu_assign_pointer(*sp, s);
626
627	goto insert;
628
629errout:
 
630	kfree(f);
631errout2:
632	tcf_exts_destroy(&e);
633	return err;
634}
635
636static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 
637{
638	struct rsvp_head *head = rtnl_dereference(tp->root);
639	unsigned int h, h1;
640
641	if (arg->stop)
642		return;
643
644	for (h = 0; h < 256; h++) {
645		struct rsvp_session *s;
646
647		for (s = rtnl_dereference(head->ht[h]); s;
648		     s = rtnl_dereference(s->next)) {
649			for (h1 = 0; h1 <= 16; h1++) {
650				struct rsvp_filter *f;
651
652				for (f = rtnl_dereference(s->ht[h1]); f;
653				     f = rtnl_dereference(f->next)) {
654					if (arg->count < arg->skip) {
655						arg->count++;
656						continue;
657					}
658					if (arg->fn(tp, (unsigned long)f, arg) < 0) {
659						arg->stop = 1;
660						return;
661					}
662					arg->count++;
663				}
664			}
665		}
666	}
667}
668
669static int rsvp_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
670		     struct sk_buff *skb, struct tcmsg *t)
671{
672	struct rsvp_filter *f = (struct rsvp_filter *)fh;
673	struct rsvp_session *s;
674	struct nlattr *nest;
675	struct tc_rsvp_pinfo pinfo;
676
677	if (f == NULL)
678		return skb->len;
679	s = f->sess;
680
681	t->tcm_handle = f->handle;
682
683	nest = nla_nest_start(skb, TCA_OPTIONS);
684	if (nest == NULL)
685		goto nla_put_failure;
686
687	if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
688		goto nla_put_failure;
689	pinfo.dpi = s->dpi;
690	pinfo.spi = f->spi;
691	pinfo.protocol = s->protocol;
692	pinfo.tunnelid = s->tunnelid;
693	pinfo.tunnelhdr = f->tunnelhdr;
694	pinfo.pad = 0;
695	if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
696		goto nla_put_failure;
697	if (f->res.classid &&
698	    nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
699		goto nla_put_failure;
700	if (((f->handle >> 8) & 0xFF) != 16 &&
701	    nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
702		goto nla_put_failure;
703
704	if (tcf_exts_dump(skb, &f->exts) < 0)
705		goto nla_put_failure;
706
707	nla_nest_end(skb, nest);
708
709	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
710		goto nla_put_failure;
711	return skb->len;
712
713nla_put_failure:
714	nla_nest_cancel(skb, nest);
715	return -1;
716}
717
 
 
 
 
 
 
 
 
718static struct tcf_proto_ops RSVP_OPS __read_mostly = {
719	.kind		=	RSVP_ID,
720	.classify	=	rsvp_classify,
721	.init		=	rsvp_init,
722	.destroy	=	rsvp_destroy,
723	.get		=	rsvp_get,
724	.change		=	rsvp_change,
725	.delete		=	rsvp_delete,
726	.walk		=	rsvp_walk,
727	.dump		=	rsvp_dump,
 
728	.owner		=	THIS_MODULE,
729};
730
731static int __init init_rsvp(void)
732{
733	return register_tcf_proto_ops(&RSVP_OPS);
734}
735
736static void __exit exit_rsvp(void)
737{
738	unregister_tcf_proto_ops(&RSVP_OPS);
739}
740
741module_init(init_rsvp)
742module_exit(exit_rsvp)