Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v3.1
  1/*
  2 * net/sched/cls_flow.c		Generic flow classifier
  3 *
  4 * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License
  8 * as published by the Free Software Foundation; either version 2
  9 * of the License, or (at your option) any later version.
 10 */
 11
 12#include <linux/kernel.h>
 13#include <linux/init.h>
 14#include <linux/list.h>
 15#include <linux/jhash.h>
 16#include <linux/random.h>
 17#include <linux/pkt_cls.h>
 18#include <linux/skbuff.h>
 19#include <linux/in.h>
 20#include <linux/ip.h>
 21#include <linux/ipv6.h>
 22#include <linux/if_vlan.h>
 23#include <linux/slab.h>
 
 24
 25#include <net/pkt_cls.h>
 26#include <net/ip.h>
 27#include <net/route.h>
 
 
 28#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 29#include <net/netfilter/nf_conntrack.h>
 30#endif
 31
 32struct flow_head {
 33	struct list_head	filters;
 34};
 35
 36struct flow_filter {
 37	struct list_head	list;
 38	struct tcf_exts		exts;
 39	struct tcf_ematch_tree	ematches;
 40	struct timer_list	perturb_timer;
 41	u32			perturb_period;
 42	u32			handle;
 43
 44	u32			nkeys;
 45	u32			keymask;
 46	u32			mode;
 47	u32			mask;
 48	u32			xor;
 49	u32			rshift;
 50	u32			addend;
 51	u32			divisor;
 52	u32			baseclass;
 53	u32			hashrnd;
 54};
 55
 56static const struct tcf_ext_map flow_ext_map = {
 57	.action	= TCA_FLOW_ACT,
 58	.police	= TCA_FLOW_POLICE,
 59};
 60
 61static inline u32 addr_fold(void *addr)
 62{
 63	unsigned long a = (unsigned long)addr;
 64
 65	return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
 66}
 67
 68static u32 flow_get_src(struct sk_buff *skb)
 69{
 70	switch (skb->protocol) {
 71	case htons(ETH_P_IP):
 72		if (pskb_network_may_pull(skb, sizeof(struct iphdr)))
 73			return ntohl(ip_hdr(skb)->saddr);
 74		break;
 75	case htons(ETH_P_IPV6):
 76		if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
 77			return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]);
 78		break;
 79	}
 80
 81	return addr_fold(skb->sk);
 82}
 83
 84static u32 flow_get_dst(struct sk_buff *skb)
 85{
 86	switch (skb->protocol) {
 87	case htons(ETH_P_IP):
 88		if (pskb_network_may_pull(skb, sizeof(struct iphdr)))
 89			return ntohl(ip_hdr(skb)->daddr);
 90		break;
 91	case htons(ETH_P_IPV6):
 92		if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
 93			return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]);
 94		break;
 95	}
 96
 97	return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
 98}
 99
100static u32 flow_get_proto(struct sk_buff *skb)
101{
102	switch (skb->protocol) {
103	case htons(ETH_P_IP):
104		return pskb_network_may_pull(skb, sizeof(struct iphdr)) ?
105		       ip_hdr(skb)->protocol : 0;
106	case htons(ETH_P_IPV6):
107		return pskb_network_may_pull(skb, sizeof(struct ipv6hdr)) ?
108		       ipv6_hdr(skb)->nexthdr : 0;
109	default:
110		return 0;
111	}
112}
113
114static u32 flow_get_proto_src(struct sk_buff *skb)
115{
116	switch (skb->protocol) {
117	case htons(ETH_P_IP): {
118		struct iphdr *iph;
119		int poff;
120
121		if (!pskb_network_may_pull(skb, sizeof(*iph)))
122			break;
123		iph = ip_hdr(skb);
124		if (ip_is_fragment(iph))
125			break;
126		poff = proto_ports_offset(iph->protocol);
127		if (poff >= 0 &&
128		    pskb_network_may_pull(skb, iph->ihl * 4 + 2 + poff)) {
129			iph = ip_hdr(skb);
130			return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 +
131						 poff));
132		}
133		break;
134	}
135	case htons(ETH_P_IPV6): {
136		struct ipv6hdr *iph;
137		int poff;
138
139		if (!pskb_network_may_pull(skb, sizeof(*iph)))
140			break;
141		iph = ipv6_hdr(skb);
142		poff = proto_ports_offset(iph->nexthdr);
143		if (poff >= 0 &&
144		    pskb_network_may_pull(skb, sizeof(*iph) + poff + 2)) {
145			iph = ipv6_hdr(skb);
146			return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) +
147						 poff));
148		}
149		break;
150	}
151	}
152
153	return addr_fold(skb->sk);
154}
155
156static u32 flow_get_proto_dst(struct sk_buff *skb)
157{
158	switch (skb->protocol) {
159	case htons(ETH_P_IP): {
160		struct iphdr *iph;
161		int poff;
162
163		if (!pskb_network_may_pull(skb, sizeof(*iph)))
164			break;
165		iph = ip_hdr(skb);
166		if (ip_is_fragment(iph))
167			break;
168		poff = proto_ports_offset(iph->protocol);
169		if (poff >= 0 &&
170		    pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
171			iph = ip_hdr(skb);
172			return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 +
173						 2 + poff));
174		}
175		break;
176	}
177	case htons(ETH_P_IPV6): {
178		struct ipv6hdr *iph;
179		int poff;
180
181		if (!pskb_network_may_pull(skb, sizeof(*iph)))
182			break;
183		iph = ipv6_hdr(skb);
184		poff = proto_ports_offset(iph->nexthdr);
185		if (poff >= 0 &&
186		    pskb_network_may_pull(skb, sizeof(*iph) + poff + 4)) {
187			iph = ipv6_hdr(skb);
188			return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) +
189						 poff + 2));
190		}
191		break;
192	}
193	}
194
195	return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
196}
197
198static u32 flow_get_iif(const struct sk_buff *skb)
199{
200	return skb->skb_iif;
201}
202
203static u32 flow_get_priority(const struct sk_buff *skb)
204{
205	return skb->priority;
206}
207
208static u32 flow_get_mark(const struct sk_buff *skb)
209{
210	return skb->mark;
211}
212
213static u32 flow_get_nfct(const struct sk_buff *skb)
214{
215#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
216	return addr_fold(skb->nfct);
217#else
218	return 0;
219#endif
220}
221
222#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
223#define CTTUPLE(skb, member)						\
224({									\
225	enum ip_conntrack_info ctinfo;					\
226	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);			\
227	if (ct == NULL)							\
228		goto fallback;						\
229	ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member;			\
230})
231#else
232#define CTTUPLE(skb, member)						\
233({									\
234	goto fallback;							\
235	0;								\
236})
237#endif
238
239static u32 flow_get_nfct_src(struct sk_buff *skb)
240{
241	switch (skb->protocol) {
242	case htons(ETH_P_IP):
243		return ntohl(CTTUPLE(skb, src.u3.ip));
244	case htons(ETH_P_IPV6):
245		return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
246	}
247fallback:
248	return flow_get_src(skb);
249}
250
251static u32 flow_get_nfct_dst(struct sk_buff *skb)
252{
253	switch (skb->protocol) {
254	case htons(ETH_P_IP):
255		return ntohl(CTTUPLE(skb, dst.u3.ip));
256	case htons(ETH_P_IPV6):
257		return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
258	}
259fallback:
260	return flow_get_dst(skb);
261}
262
263static u32 flow_get_nfct_proto_src(struct sk_buff *skb)
264{
265	return ntohs(CTTUPLE(skb, src.u.all));
266fallback:
267	return flow_get_proto_src(skb);
268}
269
270static u32 flow_get_nfct_proto_dst(struct sk_buff *skb)
271{
272	return ntohs(CTTUPLE(skb, dst.u.all));
273fallback:
274	return flow_get_proto_dst(skb);
275}
276
277static u32 flow_get_rtclassid(const struct sk_buff *skb)
278{
279#ifdef CONFIG_IP_ROUTE_CLASSID
280	if (skb_dst(skb))
281		return skb_dst(skb)->tclassid;
282#endif
283	return 0;
284}
285
286static u32 flow_get_skuid(const struct sk_buff *skb)
287{
288	if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file)
289		return skb->sk->sk_socket->file->f_cred->fsuid;
290	return 0;
291}
292
293static u32 flow_get_skgid(const struct sk_buff *skb)
294{
295	if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file)
296		return skb->sk->sk_socket->file->f_cred->fsgid;
297	return 0;
298}
299
300static u32 flow_get_vlan_tag(const struct sk_buff *skb)
301{
302	u16 uninitialized_var(tag);
303
304	if (vlan_get_tag(skb, &tag) < 0)
305		return 0;
306	return tag & VLAN_VID_MASK;
307}
308
309static u32 flow_get_rxhash(struct sk_buff *skb)
310{
311	return skb_get_rxhash(skb);
312}
313
314static u32 flow_key_get(struct sk_buff *skb, int key)
315{
316	switch (key) {
317	case FLOW_KEY_SRC:
318		return flow_get_src(skb);
319	case FLOW_KEY_DST:
320		return flow_get_dst(skb);
321	case FLOW_KEY_PROTO:
322		return flow_get_proto(skb);
323	case FLOW_KEY_PROTO_SRC:
324		return flow_get_proto_src(skb);
325	case FLOW_KEY_PROTO_DST:
326		return flow_get_proto_dst(skb);
327	case FLOW_KEY_IIF:
328		return flow_get_iif(skb);
329	case FLOW_KEY_PRIORITY:
330		return flow_get_priority(skb);
331	case FLOW_KEY_MARK:
332		return flow_get_mark(skb);
333	case FLOW_KEY_NFCT:
334		return flow_get_nfct(skb);
335	case FLOW_KEY_NFCT_SRC:
336		return flow_get_nfct_src(skb);
337	case FLOW_KEY_NFCT_DST:
338		return flow_get_nfct_dst(skb);
339	case FLOW_KEY_NFCT_PROTO_SRC:
340		return flow_get_nfct_proto_src(skb);
341	case FLOW_KEY_NFCT_PROTO_DST:
342		return flow_get_nfct_proto_dst(skb);
343	case FLOW_KEY_RTCLASSID:
344		return flow_get_rtclassid(skb);
345	case FLOW_KEY_SKUID:
346		return flow_get_skuid(skb);
347	case FLOW_KEY_SKGID:
348		return flow_get_skgid(skb);
349	case FLOW_KEY_VLAN_TAG:
350		return flow_get_vlan_tag(skb);
351	case FLOW_KEY_RXHASH:
352		return flow_get_rxhash(skb);
353	default:
354		WARN_ON(1);
355		return 0;
356	}
357}
358
 
 
 
 
 
 
 
 
 
 
359static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
360			 struct tcf_result *res)
361{
362	struct flow_head *head = tp->root;
363	struct flow_filter *f;
364	u32 keymask;
365	u32 classid;
366	unsigned int n, key;
367	int r;
368
369	list_for_each_entry(f, &head->filters, list) {
370		u32 keys[f->nkeys];
 
371
372		if (!tcf_em_tree_match(skb, &f->ematches, NULL))
373			continue;
374
375		keymask = f->keymask;
 
 
376
377		for (n = 0; n < f->nkeys; n++) {
378			key = ffs(keymask) - 1;
379			keymask &= ~(1 << key);
380			keys[n] = flow_key_get(skb, key);
381		}
382
383		if (f->mode == FLOW_MODE_HASH)
384			classid = jhash2(keys, f->nkeys, f->hashrnd);
385		else {
386			classid = keys[0];
387			classid = (classid & f->mask) ^ f->xor;
388			classid = (classid >> f->rshift) + f->addend;
389		}
390
391		if (f->divisor)
392			classid %= f->divisor;
393
394		res->class   = 0;
395		res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
396
397		r = tcf_exts_exec(skb, &f->exts, res);
398		if (r < 0)
399			continue;
400		return r;
401	}
402	return -1;
403}
404
405static void flow_perturbation(unsigned long arg)
406{
407	struct flow_filter *f = (struct flow_filter *)arg;
408
409	get_random_bytes(&f->hashrnd, 4);
410	if (f->perturb_period)
411		mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
412}
413
414static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
415	[TCA_FLOW_KEYS]		= { .type = NLA_U32 },
416	[TCA_FLOW_MODE]		= { .type = NLA_U32 },
417	[TCA_FLOW_BASECLASS]	= { .type = NLA_U32 },
418	[TCA_FLOW_RSHIFT]	= { .type = NLA_U32 },
419	[TCA_FLOW_ADDEND]	= { .type = NLA_U32 },
420	[TCA_FLOW_MASK]		= { .type = NLA_U32 },
421	[TCA_FLOW_XOR]		= { .type = NLA_U32 },
422	[TCA_FLOW_DIVISOR]	= { .type = NLA_U32 },
423	[TCA_FLOW_ACT]		= { .type = NLA_NESTED },
424	[TCA_FLOW_POLICE]	= { .type = NLA_NESTED },
425	[TCA_FLOW_EMATCHES]	= { .type = NLA_NESTED },
426	[TCA_FLOW_PERTURB]	= { .type = NLA_U32 },
427};
428
429static int flow_change(struct tcf_proto *tp, unsigned long base,
430		       u32 handle, struct nlattr **tca,
431		       unsigned long *arg)
432{
433	struct flow_head *head = tp->root;
434	struct flow_filter *f;
435	struct nlattr *opt = tca[TCA_OPTIONS];
436	struct nlattr *tb[TCA_FLOW_MAX + 1];
437	struct tcf_exts e;
438	struct tcf_ematch_tree t;
439	unsigned int nkeys = 0;
440	unsigned int perturb_period = 0;
441	u32 baseclass = 0;
442	u32 keymask = 0;
443	u32 mode;
444	int err;
445
446	if (opt == NULL)
447		return -EINVAL;
448
449	err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy);
450	if (err < 0)
451		return err;
452
453	if (tb[TCA_FLOW_BASECLASS]) {
454		baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
455		if (TC_H_MIN(baseclass) == 0)
456			return -EINVAL;
457	}
458
459	if (tb[TCA_FLOW_KEYS]) {
460		keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
461
462		nkeys = hweight32(keymask);
463		if (nkeys == 0)
464			return -EINVAL;
465
466		if (fls(keymask) - 1 > FLOW_KEY_MAX)
467			return -EOPNOTSUPP;
468	}
469
470	err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &flow_ext_map);
471	if (err < 0)
472		return err;
473
474	err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &t);
475	if (err < 0)
476		goto err1;
477
478	f = (struct flow_filter *)*arg;
479	if (f != NULL) {
480		err = -EINVAL;
481		if (f->handle != handle && handle)
482			goto err2;
483
484		mode = f->mode;
485		if (tb[TCA_FLOW_MODE])
486			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
487		if (mode != FLOW_MODE_HASH && nkeys > 1)
488			goto err2;
489
490		if (mode == FLOW_MODE_HASH)
491			perturb_period = f->perturb_period;
492		if (tb[TCA_FLOW_PERTURB]) {
493			if (mode != FLOW_MODE_HASH)
494				goto err2;
495			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
496		}
497	} else {
498		err = -EINVAL;
499		if (!handle)
500			goto err2;
501		if (!tb[TCA_FLOW_KEYS])
502			goto err2;
503
504		mode = FLOW_MODE_MAP;
505		if (tb[TCA_FLOW_MODE])
506			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
507		if (mode != FLOW_MODE_HASH && nkeys > 1)
508			goto err2;
509
510		if (tb[TCA_FLOW_PERTURB]) {
511			if (mode != FLOW_MODE_HASH)
512				goto err2;
513			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
514		}
515
516		if (TC_H_MAJ(baseclass) == 0)
517			baseclass = TC_H_MAKE(tp->q->handle, baseclass);
518		if (TC_H_MIN(baseclass) == 0)
519			baseclass = TC_H_MAKE(baseclass, 1);
520
521		err = -ENOBUFS;
522		f = kzalloc(sizeof(*f), GFP_KERNEL);
523		if (f == NULL)
524			goto err2;
525
526		f->handle = handle;
527		f->mask	  = ~0U;
528
529		get_random_bytes(&f->hashrnd, 4);
530		f->perturb_timer.function = flow_perturbation;
531		f->perturb_timer.data = (unsigned long)f;
532		init_timer_deferrable(&f->perturb_timer);
533	}
534
535	tcf_exts_change(tp, &f->exts, &e);
536	tcf_em_tree_change(tp, &f->ematches, &t);
537
538	tcf_tree_lock(tp);
539
540	if (tb[TCA_FLOW_KEYS]) {
541		f->keymask = keymask;
542		f->nkeys   = nkeys;
543	}
544
545	f->mode = mode;
546
547	if (tb[TCA_FLOW_MASK])
548		f->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
549	if (tb[TCA_FLOW_XOR])
550		f->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
551	if (tb[TCA_FLOW_RSHIFT])
552		f->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
553	if (tb[TCA_FLOW_ADDEND])
554		f->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
555
556	if (tb[TCA_FLOW_DIVISOR])
557		f->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
558	if (baseclass)
559		f->baseclass = baseclass;
560
561	f->perturb_period = perturb_period;
562	del_timer(&f->perturb_timer);
563	if (perturb_period)
564		mod_timer(&f->perturb_timer, jiffies + perturb_period);
565
566	if (*arg == 0)
567		list_add_tail(&f->list, &head->filters);
568
569	tcf_tree_unlock(tp);
570
571	*arg = (unsigned long)f;
572	return 0;
573
574err2:
575	tcf_em_tree_destroy(tp, &t);
576err1:
577	tcf_exts_destroy(tp, &e);
578	return err;
579}
580
581static void flow_destroy_filter(struct tcf_proto *tp, struct flow_filter *f)
582{
583	del_timer_sync(&f->perturb_timer);
584	tcf_exts_destroy(tp, &f->exts);
585	tcf_em_tree_destroy(tp, &f->ematches);
586	kfree(f);
587}
588
589static int flow_delete(struct tcf_proto *tp, unsigned long arg)
590{
591	struct flow_filter *f = (struct flow_filter *)arg;
592
593	tcf_tree_lock(tp);
594	list_del(&f->list);
595	tcf_tree_unlock(tp);
596	flow_destroy_filter(tp, f);
597	return 0;
598}
599
600static int flow_init(struct tcf_proto *tp)
601{
602	struct flow_head *head;
603
604	head = kzalloc(sizeof(*head), GFP_KERNEL);
605	if (head == NULL)
606		return -ENOBUFS;
607	INIT_LIST_HEAD(&head->filters);
608	tp->root = head;
609	return 0;
610}
611
612static void flow_destroy(struct tcf_proto *tp)
613{
614	struct flow_head *head = tp->root;
615	struct flow_filter *f, *next;
616
617	list_for_each_entry_safe(f, next, &head->filters, list) {
618		list_del(&f->list);
619		flow_destroy_filter(tp, f);
620	}
621	kfree(head);
622}
623
624static unsigned long flow_get(struct tcf_proto *tp, u32 handle)
625{
626	struct flow_head *head = tp->root;
627	struct flow_filter *f;
628
629	list_for_each_entry(f, &head->filters, list)
630		if (f->handle == handle)
631			return (unsigned long)f;
632	return 0;
633}
634
635static void flow_put(struct tcf_proto *tp, unsigned long f)
636{
637}
638
639static int flow_dump(struct tcf_proto *tp, unsigned long fh,
640		     struct sk_buff *skb, struct tcmsg *t)
641{
642	struct flow_filter *f = (struct flow_filter *)fh;
643	struct nlattr *nest;
644
645	if (f == NULL)
646		return skb->len;
647
648	t->tcm_handle = f->handle;
649
650	nest = nla_nest_start(skb, TCA_OPTIONS);
651	if (nest == NULL)
652		goto nla_put_failure;
653
654	NLA_PUT_U32(skb, TCA_FLOW_KEYS, f->keymask);
655	NLA_PUT_U32(skb, TCA_FLOW_MODE, f->mode);
 
656
657	if (f->mask != ~0 || f->xor != 0) {
658		NLA_PUT_U32(skb, TCA_FLOW_MASK, f->mask);
659		NLA_PUT_U32(skb, TCA_FLOW_XOR, f->xor);
 
660	}
661	if (f->rshift)
662		NLA_PUT_U32(skb, TCA_FLOW_RSHIFT, f->rshift);
663	if (f->addend)
664		NLA_PUT_U32(skb, TCA_FLOW_ADDEND, f->addend);
665
666	if (f->divisor)
667		NLA_PUT_U32(skb, TCA_FLOW_DIVISOR, f->divisor);
668	if (f->baseclass)
669		NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass);
670
671	if (f->perturb_period)
672		NLA_PUT_U32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ);
 
 
 
 
 
 
 
 
673
674	if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0)
675		goto nla_put_failure;
676#ifdef CONFIG_NET_EMATCH
677	if (f->ematches.hdr.nmatches &&
678	    tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
679		goto nla_put_failure;
680#endif
681	nla_nest_end(skb, nest);
682
683	if (tcf_exts_dump_stats(skb, &f->exts, &flow_ext_map) < 0)
684		goto nla_put_failure;
685
686	return skb->len;
687
688nla_put_failure:
689	nlmsg_trim(skb, nest);
690	return -1;
691}
692
693static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg)
694{
695	struct flow_head *head = tp->root;
696	struct flow_filter *f;
697
698	list_for_each_entry(f, &head->filters, list) {
699		if (arg->count < arg->skip)
700			goto skip;
701		if (arg->fn(tp, (unsigned long)f, arg) < 0) {
702			arg->stop = 1;
703			break;
704		}
705skip:
706		arg->count++;
707	}
708}
709
710static struct tcf_proto_ops cls_flow_ops __read_mostly = {
711	.kind		= "flow",
712	.classify	= flow_classify,
713	.init		= flow_init,
714	.destroy	= flow_destroy,
715	.change		= flow_change,
716	.delete		= flow_delete,
717	.get		= flow_get,
718	.put		= flow_put,
719	.dump		= flow_dump,
720	.walk		= flow_walk,
721	.owner		= THIS_MODULE,
722};
723
724static int __init cls_flow_init(void)
725{
726	return register_tcf_proto_ops(&cls_flow_ops);
727}
728
729static void __exit cls_flow_exit(void)
730{
731	unregister_tcf_proto_ops(&cls_flow_ops);
732}
733
734module_init(cls_flow_init);
735module_exit(cls_flow_exit);
736
737MODULE_LICENSE("GPL");
738MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
739MODULE_DESCRIPTION("TC flow classifier");
v3.5.6
  1/*
  2 * net/sched/cls_flow.c		Generic flow classifier
  3 *
  4 * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License
  8 * as published by the Free Software Foundation; either version 2
  9 * of the License, or (at your option) any later version.
 10 */
 11
 12#include <linux/kernel.h>
 13#include <linux/init.h>
 14#include <linux/list.h>
 15#include <linux/jhash.h>
 16#include <linux/random.h>
 17#include <linux/pkt_cls.h>
 18#include <linux/skbuff.h>
 19#include <linux/in.h>
 20#include <linux/ip.h>
 21#include <linux/ipv6.h>
 22#include <linux/if_vlan.h>
 23#include <linux/slab.h>
 24#include <linux/module.h>
 25
 26#include <net/pkt_cls.h>
 27#include <net/ip.h>
 28#include <net/route.h>
 29#include <net/flow_keys.h>
 30
 31#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 32#include <net/netfilter/nf_conntrack.h>
 33#endif
 34
 35struct flow_head {
 36	struct list_head	filters;
 37};
 38
 39struct flow_filter {
 40	struct list_head	list;
 41	struct tcf_exts		exts;
 42	struct tcf_ematch_tree	ematches;
 43	struct timer_list	perturb_timer;
 44	u32			perturb_period;
 45	u32			handle;
 46
 47	u32			nkeys;
 48	u32			keymask;
 49	u32			mode;
 50	u32			mask;
 51	u32			xor;
 52	u32			rshift;
 53	u32			addend;
 54	u32			divisor;
 55	u32			baseclass;
 56	u32			hashrnd;
 57};
 58
 59static const struct tcf_ext_map flow_ext_map = {
 60	.action	= TCA_FLOW_ACT,
 61	.police	= TCA_FLOW_POLICE,
 62};
 63
 64static inline u32 addr_fold(void *addr)
 65{
 66	unsigned long a = (unsigned long)addr;
 67
 68	return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
 69}
 70
 71static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
 72{
 73	if (flow->src)
 74		return ntohl(flow->src);
 
 
 
 
 
 
 
 
 
 75	return addr_fold(skb->sk);
 76}
 77
 78static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 79{
 80	if (flow->dst)
 81		return ntohl(flow->dst);
 
 
 
 
 
 
 
 
 
 82	return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
 83}
 84
 85static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow)
 86{
 87	return flow->ip_proto;
 
 
 
 
 
 
 
 
 
 88}
 89
 90static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
 91{
 92	if (flow->ports)
 93		return ntohs(flow->port16[0]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 94
 95	return addr_fold(skb->sk);
 96}
 97
 98static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 99{
100	if (flow->ports)
101		return ntohs(flow->port16[1]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
103	return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
104}
105
106static u32 flow_get_iif(const struct sk_buff *skb)
107{
108	return skb->skb_iif;
109}
110
111static u32 flow_get_priority(const struct sk_buff *skb)
112{
113	return skb->priority;
114}
115
116static u32 flow_get_mark(const struct sk_buff *skb)
117{
118	return skb->mark;
119}
120
121static u32 flow_get_nfct(const struct sk_buff *skb)
122{
123#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
124	return addr_fold(skb->nfct);
125#else
126	return 0;
127#endif
128}
129
130#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
131#define CTTUPLE(skb, member)						\
132({									\
133	enum ip_conntrack_info ctinfo;					\
134	const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);		\
135	if (ct == NULL)							\
136		goto fallback;						\
137	ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member;			\
138})
139#else
140#define CTTUPLE(skb, member)						\
141({									\
142	goto fallback;							\
143	0;								\
144})
145#endif
146
147static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow)
148{
149	switch (skb->protocol) {
150	case htons(ETH_P_IP):
151		return ntohl(CTTUPLE(skb, src.u3.ip));
152	case htons(ETH_P_IPV6):
153		return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
154	}
155fallback:
156	return flow_get_src(skb, flow);
157}
158
159static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow)
160{
161	switch (skb->protocol) {
162	case htons(ETH_P_IP):
163		return ntohl(CTTUPLE(skb, dst.u3.ip));
164	case htons(ETH_P_IPV6):
165		return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
166	}
167fallback:
168	return flow_get_dst(skb, flow);
169}
170
171static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
172{
173	return ntohs(CTTUPLE(skb, src.u.all));
174fallback:
175	return flow_get_proto_src(skb, flow);
176}
177
178static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
179{
180	return ntohs(CTTUPLE(skb, dst.u.all));
181fallback:
182	return flow_get_proto_dst(skb, flow);
183}
184
185static u32 flow_get_rtclassid(const struct sk_buff *skb)
186{
187#ifdef CONFIG_IP_ROUTE_CLASSID
188	if (skb_dst(skb))
189		return skb_dst(skb)->tclassid;
190#endif
191	return 0;
192}
193
194static u32 flow_get_skuid(const struct sk_buff *skb)
195{
196	if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file)
197		return skb->sk->sk_socket->file->f_cred->fsuid;
198	return 0;
199}
200
201static u32 flow_get_skgid(const struct sk_buff *skb)
202{
203	if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file)
204		return skb->sk->sk_socket->file->f_cred->fsgid;
205	return 0;
206}
207
208static u32 flow_get_vlan_tag(const struct sk_buff *skb)
209{
210	u16 uninitialized_var(tag);
211
212	if (vlan_get_tag(skb, &tag) < 0)
213		return 0;
214	return tag & VLAN_VID_MASK;
215}
216
217static u32 flow_get_rxhash(struct sk_buff *skb)
218{
219	return skb_get_rxhash(skb);
220}
221
222static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
223{
224	switch (key) {
225	case FLOW_KEY_SRC:
226		return flow_get_src(skb, flow);
227	case FLOW_KEY_DST:
228		return flow_get_dst(skb, flow);
229	case FLOW_KEY_PROTO:
230		return flow_get_proto(skb, flow);
231	case FLOW_KEY_PROTO_SRC:
232		return flow_get_proto_src(skb, flow);
233	case FLOW_KEY_PROTO_DST:
234		return flow_get_proto_dst(skb, flow);
235	case FLOW_KEY_IIF:
236		return flow_get_iif(skb);
237	case FLOW_KEY_PRIORITY:
238		return flow_get_priority(skb);
239	case FLOW_KEY_MARK:
240		return flow_get_mark(skb);
241	case FLOW_KEY_NFCT:
242		return flow_get_nfct(skb);
243	case FLOW_KEY_NFCT_SRC:
244		return flow_get_nfct_src(skb, flow);
245	case FLOW_KEY_NFCT_DST:
246		return flow_get_nfct_dst(skb, flow);
247	case FLOW_KEY_NFCT_PROTO_SRC:
248		return flow_get_nfct_proto_src(skb, flow);
249	case FLOW_KEY_NFCT_PROTO_DST:
250		return flow_get_nfct_proto_dst(skb, flow);
251	case FLOW_KEY_RTCLASSID:
252		return flow_get_rtclassid(skb);
253	case FLOW_KEY_SKUID:
254		return flow_get_skuid(skb);
255	case FLOW_KEY_SKGID:
256		return flow_get_skgid(skb);
257	case FLOW_KEY_VLAN_TAG:
258		return flow_get_vlan_tag(skb);
259	case FLOW_KEY_RXHASH:
260		return flow_get_rxhash(skb);
261	default:
262		WARN_ON(1);
263		return 0;
264	}
265}
266
267#define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | 		\
268			  (1 << FLOW_KEY_DST) |			\
269			  (1 << FLOW_KEY_PROTO) |		\
270			  (1 << FLOW_KEY_PROTO_SRC) |		\
271			  (1 << FLOW_KEY_PROTO_DST) | 		\
272			  (1 << FLOW_KEY_NFCT_SRC) |		\
273			  (1 << FLOW_KEY_NFCT_DST) |		\
274			  (1 << FLOW_KEY_NFCT_PROTO_SRC) |	\
275			  (1 << FLOW_KEY_NFCT_PROTO_DST))
276
277static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
278			 struct tcf_result *res)
279{
280	struct flow_head *head = tp->root;
281	struct flow_filter *f;
282	u32 keymask;
283	u32 classid;
284	unsigned int n, key;
285	int r;
286
287	list_for_each_entry(f, &head->filters, list) {
288		u32 keys[FLOW_KEY_MAX + 1];
289		struct flow_keys flow_keys;
290
291		if (!tcf_em_tree_match(skb, &f->ematches, NULL))
292			continue;
293
294		keymask = f->keymask;
295		if (keymask & FLOW_KEYS_NEEDED)
296			skb_flow_dissect(skb, &flow_keys);
297
298		for (n = 0; n < f->nkeys; n++) {
299			key = ffs(keymask) - 1;
300			keymask &= ~(1 << key);
301			keys[n] = flow_key_get(skb, key, &flow_keys);
302		}
303
304		if (f->mode == FLOW_MODE_HASH)
305			classid = jhash2(keys, f->nkeys, f->hashrnd);
306		else {
307			classid = keys[0];
308			classid = (classid & f->mask) ^ f->xor;
309			classid = (classid >> f->rshift) + f->addend;
310		}
311
312		if (f->divisor)
313			classid %= f->divisor;
314
315		res->class   = 0;
316		res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
317
318		r = tcf_exts_exec(skb, &f->exts, res);
319		if (r < 0)
320			continue;
321		return r;
322	}
323	return -1;
324}
325
326static void flow_perturbation(unsigned long arg)
327{
328	struct flow_filter *f = (struct flow_filter *)arg;
329
330	get_random_bytes(&f->hashrnd, 4);
331	if (f->perturb_period)
332		mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
333}
334
335static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
336	[TCA_FLOW_KEYS]		= { .type = NLA_U32 },
337	[TCA_FLOW_MODE]		= { .type = NLA_U32 },
338	[TCA_FLOW_BASECLASS]	= { .type = NLA_U32 },
339	[TCA_FLOW_RSHIFT]	= { .type = NLA_U32 },
340	[TCA_FLOW_ADDEND]	= { .type = NLA_U32 },
341	[TCA_FLOW_MASK]		= { .type = NLA_U32 },
342	[TCA_FLOW_XOR]		= { .type = NLA_U32 },
343	[TCA_FLOW_DIVISOR]	= { .type = NLA_U32 },
344	[TCA_FLOW_ACT]		= { .type = NLA_NESTED },
345	[TCA_FLOW_POLICE]	= { .type = NLA_NESTED },
346	[TCA_FLOW_EMATCHES]	= { .type = NLA_NESTED },
347	[TCA_FLOW_PERTURB]	= { .type = NLA_U32 },
348};
349
350static int flow_change(struct tcf_proto *tp, unsigned long base,
351		       u32 handle, struct nlattr **tca,
352		       unsigned long *arg)
353{
354	struct flow_head *head = tp->root;
355	struct flow_filter *f;
356	struct nlattr *opt = tca[TCA_OPTIONS];
357	struct nlattr *tb[TCA_FLOW_MAX + 1];
358	struct tcf_exts e;
359	struct tcf_ematch_tree t;
360	unsigned int nkeys = 0;
361	unsigned int perturb_period = 0;
362	u32 baseclass = 0;
363	u32 keymask = 0;
364	u32 mode;
365	int err;
366
367	if (opt == NULL)
368		return -EINVAL;
369
370	err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy);
371	if (err < 0)
372		return err;
373
374	if (tb[TCA_FLOW_BASECLASS]) {
375		baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
376		if (TC_H_MIN(baseclass) == 0)
377			return -EINVAL;
378	}
379
380	if (tb[TCA_FLOW_KEYS]) {
381		keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
382
383		nkeys = hweight32(keymask);
384		if (nkeys == 0)
385			return -EINVAL;
386
387		if (fls(keymask) - 1 > FLOW_KEY_MAX)
388			return -EOPNOTSUPP;
389	}
390
391	err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &flow_ext_map);
392	if (err < 0)
393		return err;
394
395	err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &t);
396	if (err < 0)
397		goto err1;
398
399	f = (struct flow_filter *)*arg;
400	if (f != NULL) {
401		err = -EINVAL;
402		if (f->handle != handle && handle)
403			goto err2;
404
405		mode = f->mode;
406		if (tb[TCA_FLOW_MODE])
407			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
408		if (mode != FLOW_MODE_HASH && nkeys > 1)
409			goto err2;
410
411		if (mode == FLOW_MODE_HASH)
412			perturb_period = f->perturb_period;
413		if (tb[TCA_FLOW_PERTURB]) {
414			if (mode != FLOW_MODE_HASH)
415				goto err2;
416			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
417		}
418	} else {
419		err = -EINVAL;
420		if (!handle)
421			goto err2;
422		if (!tb[TCA_FLOW_KEYS])
423			goto err2;
424
425		mode = FLOW_MODE_MAP;
426		if (tb[TCA_FLOW_MODE])
427			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
428		if (mode != FLOW_MODE_HASH && nkeys > 1)
429			goto err2;
430
431		if (tb[TCA_FLOW_PERTURB]) {
432			if (mode != FLOW_MODE_HASH)
433				goto err2;
434			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
435		}
436
437		if (TC_H_MAJ(baseclass) == 0)
438			baseclass = TC_H_MAKE(tp->q->handle, baseclass);
439		if (TC_H_MIN(baseclass) == 0)
440			baseclass = TC_H_MAKE(baseclass, 1);
441
442		err = -ENOBUFS;
443		f = kzalloc(sizeof(*f), GFP_KERNEL);
444		if (f == NULL)
445			goto err2;
446
447		f->handle = handle;
448		f->mask	  = ~0U;
449
450		get_random_bytes(&f->hashrnd, 4);
451		f->perturb_timer.function = flow_perturbation;
452		f->perturb_timer.data = (unsigned long)f;
453		init_timer_deferrable(&f->perturb_timer);
454	}
455
456	tcf_exts_change(tp, &f->exts, &e);
457	tcf_em_tree_change(tp, &f->ematches, &t);
458
459	tcf_tree_lock(tp);
460
461	if (tb[TCA_FLOW_KEYS]) {
462		f->keymask = keymask;
463		f->nkeys   = nkeys;
464	}
465
466	f->mode = mode;
467
468	if (tb[TCA_FLOW_MASK])
469		f->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
470	if (tb[TCA_FLOW_XOR])
471		f->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
472	if (tb[TCA_FLOW_RSHIFT])
473		f->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
474	if (tb[TCA_FLOW_ADDEND])
475		f->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
476
477	if (tb[TCA_FLOW_DIVISOR])
478		f->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
479	if (baseclass)
480		f->baseclass = baseclass;
481
482	f->perturb_period = perturb_period;
483	del_timer(&f->perturb_timer);
484	if (perturb_period)
485		mod_timer(&f->perturb_timer, jiffies + perturb_period);
486
487	if (*arg == 0)
488		list_add_tail(&f->list, &head->filters);
489
490	tcf_tree_unlock(tp);
491
492	*arg = (unsigned long)f;
493	return 0;
494
495err2:
496	tcf_em_tree_destroy(tp, &t);
497err1:
498	tcf_exts_destroy(tp, &e);
499	return err;
500}
501
502static void flow_destroy_filter(struct tcf_proto *tp, struct flow_filter *f)
503{
504	del_timer_sync(&f->perturb_timer);
505	tcf_exts_destroy(tp, &f->exts);
506	tcf_em_tree_destroy(tp, &f->ematches);
507	kfree(f);
508}
509
510static int flow_delete(struct tcf_proto *tp, unsigned long arg)
511{
512	struct flow_filter *f = (struct flow_filter *)arg;
513
514	tcf_tree_lock(tp);
515	list_del(&f->list);
516	tcf_tree_unlock(tp);
517	flow_destroy_filter(tp, f);
518	return 0;
519}
520
521static int flow_init(struct tcf_proto *tp)
522{
523	struct flow_head *head;
524
525	head = kzalloc(sizeof(*head), GFP_KERNEL);
526	if (head == NULL)
527		return -ENOBUFS;
528	INIT_LIST_HEAD(&head->filters);
529	tp->root = head;
530	return 0;
531}
532
533static void flow_destroy(struct tcf_proto *tp)
534{
535	struct flow_head *head = tp->root;
536	struct flow_filter *f, *next;
537
538	list_for_each_entry_safe(f, next, &head->filters, list) {
539		list_del(&f->list);
540		flow_destroy_filter(tp, f);
541	}
542	kfree(head);
543}
544
545static unsigned long flow_get(struct tcf_proto *tp, u32 handle)
546{
547	struct flow_head *head = tp->root;
548	struct flow_filter *f;
549
550	list_for_each_entry(f, &head->filters, list)
551		if (f->handle == handle)
552			return (unsigned long)f;
553	return 0;
554}
555
556static void flow_put(struct tcf_proto *tp, unsigned long f)
557{
558}
559
560static int flow_dump(struct tcf_proto *tp, unsigned long fh,
561		     struct sk_buff *skb, struct tcmsg *t)
562{
563	struct flow_filter *f = (struct flow_filter *)fh;
564	struct nlattr *nest;
565
566	if (f == NULL)
567		return skb->len;
568
569	t->tcm_handle = f->handle;
570
571	nest = nla_nest_start(skb, TCA_OPTIONS);
572	if (nest == NULL)
573		goto nla_put_failure;
574
575	if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
576	    nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
577		goto nla_put_failure;
578
579	if (f->mask != ~0 || f->xor != 0) {
580		if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
581		    nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
582			goto nla_put_failure;
583	}
584	if (f->rshift &&
585	    nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
586		goto nla_put_failure;
587	if (f->addend &&
588	    nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
589		goto nla_put_failure;
 
 
 
590
591	if (f->divisor &&
592	    nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
593		goto nla_put_failure;
594	if (f->baseclass &&
595	    nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
596		goto nla_put_failure;
597
598	if (f->perturb_period &&
599	    nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
600		goto nla_put_failure;
601
602	if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0)
603		goto nla_put_failure;
604#ifdef CONFIG_NET_EMATCH
605	if (f->ematches.hdr.nmatches &&
606	    tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
607		goto nla_put_failure;
608#endif
609	nla_nest_end(skb, nest);
610
611	if (tcf_exts_dump_stats(skb, &f->exts, &flow_ext_map) < 0)
612		goto nla_put_failure;
613
614	return skb->len;
615
616nla_put_failure:
617	nlmsg_trim(skb, nest);
618	return -1;
619}
620
621static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg)
622{
623	struct flow_head *head = tp->root;
624	struct flow_filter *f;
625
626	list_for_each_entry(f, &head->filters, list) {
627		if (arg->count < arg->skip)
628			goto skip;
629		if (arg->fn(tp, (unsigned long)f, arg) < 0) {
630			arg->stop = 1;
631			break;
632		}
633skip:
634		arg->count++;
635	}
636}
637
638static struct tcf_proto_ops cls_flow_ops __read_mostly = {
639	.kind		= "flow",
640	.classify	= flow_classify,
641	.init		= flow_init,
642	.destroy	= flow_destroy,
643	.change		= flow_change,
644	.delete		= flow_delete,
645	.get		= flow_get,
646	.put		= flow_put,
647	.dump		= flow_dump,
648	.walk		= flow_walk,
649	.owner		= THIS_MODULE,
650};
651
652static int __init cls_flow_init(void)
653{
654	return register_tcf_proto_ops(&cls_flow_ops);
655}
656
657static void __exit cls_flow_exit(void)
658{
659	unregister_tcf_proto_ops(&cls_flow_ops);
660}
661
662module_init(cls_flow_init);
663module_exit(cls_flow_exit);
664
665MODULE_LICENSE("GPL");
666MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
667MODULE_DESCRIPTION("TC flow classifier");