Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1/*
  2 * net/sched/cls_flower.c		Flower classifier
  3 *
  4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License as published by
  8 * the Free Software Foundation; either version 2 of the License, or
  9 * (at your option) any later version.
 10 */
 11
 12#include <linux/kernel.h>
 13#include <linux/init.h>
 14#include <linux/module.h>
 15#include <linux/rhashtable.h>
 16
 17#include <linux/if_ether.h>
 18#include <linux/in6.h>
 19#include <linux/ip.h>
 20
 21#include <net/sch_generic.h>
 22#include <net/pkt_cls.h>
 23#include <net/ip.h>
 24#include <net/flow_dissector.h>
 25
 26struct fl_flow_key {
 27	int	indev_ifindex;
 28	struct flow_dissector_key_control control;
 29	struct flow_dissector_key_basic basic;
 30	struct flow_dissector_key_eth_addrs eth;
 31	struct flow_dissector_key_addrs ipaddrs;
 32	union {
 33		struct flow_dissector_key_ipv4_addrs ipv4;
 34		struct flow_dissector_key_ipv6_addrs ipv6;
 35	};
 36	struct flow_dissector_key_ports tp;
 37} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
 38
 39struct fl_flow_mask_range {
 40	unsigned short int start;
 41	unsigned short int end;
 42};
 43
 44struct fl_flow_mask {
 45	struct fl_flow_key key;
 46	struct fl_flow_mask_range range;
 47	struct rcu_head	rcu;
 48};
 49
 50struct cls_fl_head {
 51	struct rhashtable ht;
 52	struct fl_flow_mask mask;
 53	struct flow_dissector dissector;
 54	u32 hgen;
 55	bool mask_assigned;
 56	struct list_head filters;
 57	struct rhashtable_params ht_params;
 58	struct rcu_head rcu;
 59};
 60
 61struct cls_fl_filter {
 62	struct rhash_head ht_node;
 63	struct fl_flow_key mkey;
 64	struct tcf_exts exts;
 65	struct tcf_result res;
 66	struct fl_flow_key key;
 67	struct list_head list;
 68	u32 handle;
 69	struct rcu_head	rcu;
 70};
 71
 72static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
 73{
 74	return mask->range.end - mask->range.start;
 75}
 76
 77static void fl_mask_update_range(struct fl_flow_mask *mask)
 78{
 79	const u8 *bytes = (const u8 *) &mask->key;
 80	size_t size = sizeof(mask->key);
 81	size_t i, first = 0, last = size - 1;
 82
 83	for (i = 0; i < sizeof(mask->key); i++) {
 84		if (bytes[i]) {
 85			if (!first && i)
 86				first = i;
 87			last = i;
 88		}
 89	}
 90	mask->range.start = rounddown(first, sizeof(long));
 91	mask->range.end = roundup(last + 1, sizeof(long));
 92}
 93
 94static void *fl_key_get_start(struct fl_flow_key *key,
 95			      const struct fl_flow_mask *mask)
 96{
 97	return (u8 *) key + mask->range.start;
 98}
 99
100static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
101			      struct fl_flow_mask *mask)
102{
103	const long *lkey = fl_key_get_start(key, mask);
104	const long *lmask = fl_key_get_start(&mask->key, mask);
105	long *lmkey = fl_key_get_start(mkey, mask);
106	int i;
107
108	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
109		*lmkey++ = *lkey++ & *lmask++;
110}
111
112static void fl_clear_masked_range(struct fl_flow_key *key,
113				  struct fl_flow_mask *mask)
114{
115	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
116}
117
118static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
119		       struct tcf_result *res)
120{
121	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
122	struct cls_fl_filter *f;
123	struct fl_flow_key skb_key;
124	struct fl_flow_key skb_mkey;
125
126	fl_clear_masked_range(&skb_key, &head->mask);
127	skb_key.indev_ifindex = skb->skb_iif;
128	/* skb_flow_dissect() does not set n_proto in case an unknown protocol,
129	 * so do it rather here.
130	 */
131	skb_key.basic.n_proto = skb->protocol;
132	skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
133
134	fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
135
136	f = rhashtable_lookup_fast(&head->ht,
137				   fl_key_get_start(&skb_mkey, &head->mask),
138				   head->ht_params);
139	if (f) {
140		*res = f->res;
141		return tcf_exts_exec(skb, &f->exts, res);
142	}
143	return -1;
144}
145
146static int fl_init(struct tcf_proto *tp)
147{
148	struct cls_fl_head *head;
149
150	head = kzalloc(sizeof(*head), GFP_KERNEL);
151	if (!head)
152		return -ENOBUFS;
153
154	INIT_LIST_HEAD_RCU(&head->filters);
155	rcu_assign_pointer(tp->root, head);
156
157	return 0;
158}
159
160static void fl_destroy_filter(struct rcu_head *head)
161{
162	struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
163
164	tcf_exts_destroy(&f->exts);
165	kfree(f);
166}
167
168static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie)
169{
170	struct net_device *dev = tp->q->dev_queue->dev;
171	struct tc_cls_flower_offload offload = {0};
172	struct tc_to_netdev tc;
173
174	if (!tc_should_offload(dev, 0))
175		return;
176
177	offload.command = TC_CLSFLOWER_DESTROY;
178	offload.cookie = cookie;
179
180	tc.type = TC_SETUP_CLSFLOWER;
181	tc.cls_flower = &offload;
182
183	dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
184}
185
186static void fl_hw_replace_filter(struct tcf_proto *tp,
187				 struct flow_dissector *dissector,
188				 struct fl_flow_key *mask,
189				 struct fl_flow_key *key,
190				 struct tcf_exts *actions,
191				 unsigned long cookie, u32 flags)
192{
193	struct net_device *dev = tp->q->dev_queue->dev;
194	struct tc_cls_flower_offload offload = {0};
195	struct tc_to_netdev tc;
196
197	if (!tc_should_offload(dev, flags))
198		return;
199
200	offload.command = TC_CLSFLOWER_REPLACE;
201	offload.cookie = cookie;
202	offload.dissector = dissector;
203	offload.mask = mask;
204	offload.key = key;
205	offload.exts = actions;
206
207	tc.type = TC_SETUP_CLSFLOWER;
208	tc.cls_flower = &offload;
209
210	dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
211}
212
213static bool fl_destroy(struct tcf_proto *tp, bool force)
214{
215	struct cls_fl_head *head = rtnl_dereference(tp->root);
216	struct cls_fl_filter *f, *next;
217
218	if (!force && !list_empty(&head->filters))
219		return false;
220
221	list_for_each_entry_safe(f, next, &head->filters, list) {
222		fl_hw_destroy_filter(tp, (unsigned long)f);
223		list_del_rcu(&f->list);
224		call_rcu(&f->rcu, fl_destroy_filter);
225	}
226	RCU_INIT_POINTER(tp->root, NULL);
227	if (head->mask_assigned)
228		rhashtable_destroy(&head->ht);
229	kfree_rcu(head, rcu);
230	return true;
231}
232
233static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
234{
235	struct cls_fl_head *head = rtnl_dereference(tp->root);
236	struct cls_fl_filter *f;
237
238	list_for_each_entry(f, &head->filters, list)
239		if (f->handle == handle)
240			return (unsigned long) f;
241	return 0;
242}
243
244static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
245	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
246	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
247	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
248					    .len = IFNAMSIZ },
249	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
250	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
251	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
252	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
253	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
254	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
255	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
256	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
257	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
258	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
259	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
260	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
261	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
262	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
263	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
264	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
265	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
266	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
267};
268
269static void fl_set_key_val(struct nlattr **tb,
270			   void *val, int val_type,
271			   void *mask, int mask_type, int len)
272{
273	if (!tb[val_type])
274		return;
275	memcpy(val, nla_data(tb[val_type]), len);
276	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
277		memset(mask, 0xff, len);
278	else
279		memcpy(mask, nla_data(tb[mask_type]), len);
280}
281
282static int fl_set_key(struct net *net, struct nlattr **tb,
283		      struct fl_flow_key *key, struct fl_flow_key *mask)
284{
285#ifdef CONFIG_NET_CLS_IND
286	if (tb[TCA_FLOWER_INDEV]) {
287		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
288		if (err < 0)
289			return err;
290		key->indev_ifindex = err;
291		mask->indev_ifindex = 0xffffffff;
292	}
293#endif
294
295	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
296		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
297		       sizeof(key->eth.dst));
298	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
299		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
300		       sizeof(key->eth.src));
301
302	fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
303		       &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
304		       sizeof(key->basic.n_proto));
305
306	if (key->basic.n_proto == htons(ETH_P_IP) ||
307	    key->basic.n_proto == htons(ETH_P_IPV6)) {
308		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
309			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
310			       sizeof(key->basic.ip_proto));
311	}
312
313	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
314		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
315		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
316			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
317			       sizeof(key->ipv4.src));
318		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
319			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
320			       sizeof(key->ipv4.dst));
321	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
322		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
323		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
324			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
325			       sizeof(key->ipv6.src));
326		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
327			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
328			       sizeof(key->ipv6.dst));
329	}
330
331	if (key->basic.ip_proto == IPPROTO_TCP) {
332		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
333			       &mask->tp.src, TCA_FLOWER_UNSPEC,
334			       sizeof(key->tp.src));
335		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
336			       &mask->tp.dst, TCA_FLOWER_UNSPEC,
337			       sizeof(key->tp.dst));
338	} else if (key->basic.ip_proto == IPPROTO_UDP) {
339		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
340			       &mask->tp.src, TCA_FLOWER_UNSPEC,
341			       sizeof(key->tp.src));
342		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
343			       &mask->tp.dst, TCA_FLOWER_UNSPEC,
344			       sizeof(key->tp.dst));
345	}
346
347	return 0;
348}
349
350static bool fl_mask_eq(struct fl_flow_mask *mask1,
351		       struct fl_flow_mask *mask2)
352{
353	const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
354	const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
355
356	return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
357	       !memcmp(lmask1, lmask2, fl_mask_range(mask1));
358}
359
360static const struct rhashtable_params fl_ht_params = {
361	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
362	.head_offset = offsetof(struct cls_fl_filter, ht_node),
363	.automatic_shrinking = true,
364};
365
366static int fl_init_hashtable(struct cls_fl_head *head,
367			     struct fl_flow_mask *mask)
368{
369	head->ht_params = fl_ht_params;
370	head->ht_params.key_len = fl_mask_range(mask);
371	head->ht_params.key_offset += mask->range.start;
372
373	return rhashtable_init(&head->ht, &head->ht_params);
374}
375
376#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
377#define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
378#define FL_KEY_MEMBER_END_OFFSET(member)					\
379	(FL_KEY_MEMBER_OFFSET(member) + FL_KEY_MEMBER_SIZE(member))
380
381#define FL_KEY_IN_RANGE(mask, member)						\
382        (FL_KEY_MEMBER_OFFSET(member) <= (mask)->range.end &&			\
383         FL_KEY_MEMBER_END_OFFSET(member) >= (mask)->range.start)
384
385#define FL_KEY_SET(keys, cnt, id, member)					\
386	do {									\
387		keys[cnt].key_id = id;						\
388		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
389		cnt++;								\
390	} while(0);
391
392#define FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, id, member)			\
393	do {									\
394		if (FL_KEY_IN_RANGE(mask, member))				\
395			FL_KEY_SET(keys, cnt, id, member);			\
396	} while(0);
397
398static void fl_init_dissector(struct cls_fl_head *head,
399			      struct fl_flow_mask *mask)
400{
401	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
402	size_t cnt = 0;
403
404	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
405	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
406	FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
407			       FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
408	FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
409			       FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
410	FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
411			       FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
412	FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
413			       FLOW_DISSECTOR_KEY_PORTS, tp);
414
415	skb_flow_dissector_init(&head->dissector, keys, cnt);
416}
417
418static int fl_check_assign_mask(struct cls_fl_head *head,
419				struct fl_flow_mask *mask)
420{
421	int err;
422
423	if (head->mask_assigned) {
424		if (!fl_mask_eq(&head->mask, mask))
425			return -EINVAL;
426		else
427			return 0;
428	}
429
430	/* Mask is not assigned yet. So assign it and init hashtable
431	 * according to that.
432	 */
433	err = fl_init_hashtable(head, mask);
434	if (err)
435		return err;
436	memcpy(&head->mask, mask, sizeof(head->mask));
437	head->mask_assigned = true;
438
439	fl_init_dissector(head, mask);
440
441	return 0;
442}
443
444static int fl_set_parms(struct net *net, struct tcf_proto *tp,
445			struct cls_fl_filter *f, struct fl_flow_mask *mask,
446			unsigned long base, struct nlattr **tb,
447			struct nlattr *est, bool ovr)
448{
449	struct tcf_exts e;
450	int err;
451
452	tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
453	err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
454	if (err < 0)
455		return err;
456
457	if (tb[TCA_FLOWER_CLASSID]) {
458		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
459		tcf_bind_filter(tp, &f->res, base);
460	}
461
462	err = fl_set_key(net, tb, &f->key, &mask->key);
463	if (err)
464		goto errout;
465
466	fl_mask_update_range(mask);
467	fl_set_masked_key(&f->mkey, &f->key, mask);
468
469	tcf_exts_change(tp, &f->exts, &e);
470
471	return 0;
472errout:
473	tcf_exts_destroy(&e);
474	return err;
475}
476
477static u32 fl_grab_new_handle(struct tcf_proto *tp,
478			      struct cls_fl_head *head)
479{
480	unsigned int i = 0x80000000;
481	u32 handle;
482
483	do {
484		if (++head->hgen == 0x7FFFFFFF)
485			head->hgen = 1;
486	} while (--i > 0 && fl_get(tp, head->hgen));
487
488	if (unlikely(i == 0)) {
489		pr_err("Insufficient number of handles\n");
490		handle = 0;
491	} else {
492		handle = head->hgen;
493	}
494
495	return handle;
496}
497
498static int fl_change(struct net *net, struct sk_buff *in_skb,
499		     struct tcf_proto *tp, unsigned long base,
500		     u32 handle, struct nlattr **tca,
501		     unsigned long *arg, bool ovr)
502{
503	struct cls_fl_head *head = rtnl_dereference(tp->root);
504	struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
505	struct cls_fl_filter *fnew;
506	struct nlattr *tb[TCA_FLOWER_MAX + 1];
507	struct fl_flow_mask mask = {};
508	u32 flags = 0;
509	int err;
510
511	if (!tca[TCA_OPTIONS])
512		return -EINVAL;
513
514	err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
515	if (err < 0)
516		return err;
517
518	if (fold && handle && fold->handle != handle)
519		return -EINVAL;
520
521	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
522	if (!fnew)
523		return -ENOBUFS;
524
525	tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
526
527	if (!handle) {
528		handle = fl_grab_new_handle(tp, head);
529		if (!handle) {
530			err = -EINVAL;
531			goto errout;
532		}
533	}
534	fnew->handle = handle;
535
536	if (tb[TCA_FLOWER_FLAGS])
537		flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
538
539	err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
540	if (err)
541		goto errout;
542
543	err = fl_check_assign_mask(head, &mask);
544	if (err)
545		goto errout;
546
547	err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
548				     head->ht_params);
549	if (err)
550		goto errout;
551
552	fl_hw_replace_filter(tp,
553			     &head->dissector,
554			     &mask.key,
555			     &fnew->key,
556			     &fnew->exts,
557			     (unsigned long)fnew,
558			     flags);
559
560	if (fold) {
561		rhashtable_remove_fast(&head->ht, &fold->ht_node,
562				       head->ht_params);
563		fl_hw_destroy_filter(tp, (unsigned long)fold);
564	}
565
566	*arg = (unsigned long) fnew;
567
568	if (fold) {
569		list_replace_rcu(&fold->list, &fnew->list);
570		tcf_unbind_filter(tp, &fold->res);
571		call_rcu(&fold->rcu, fl_destroy_filter);
572	} else {
573		list_add_tail_rcu(&fnew->list, &head->filters);
574	}
575
576	return 0;
577
578errout:
579	kfree(fnew);
580	return err;
581}
582
583static int fl_delete(struct tcf_proto *tp, unsigned long arg)
584{
585	struct cls_fl_head *head = rtnl_dereference(tp->root);
586	struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
587
588	rhashtable_remove_fast(&head->ht, &f->ht_node,
589			       head->ht_params);
590	list_del_rcu(&f->list);
591	fl_hw_destroy_filter(tp, (unsigned long)f);
592	tcf_unbind_filter(tp, &f->res);
593	call_rcu(&f->rcu, fl_destroy_filter);
594	return 0;
595}
596
597static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
598{
599	struct cls_fl_head *head = rtnl_dereference(tp->root);
600	struct cls_fl_filter *f;
601
602	list_for_each_entry_rcu(f, &head->filters, list) {
603		if (arg->count < arg->skip)
604			goto skip;
605		if (arg->fn(tp, (unsigned long) f, arg) < 0) {
606			arg->stop = 1;
607			break;
608		}
609skip:
610		arg->count++;
611	}
612}
613
614static int fl_dump_key_val(struct sk_buff *skb,
615			   void *val, int val_type,
616			   void *mask, int mask_type, int len)
617{
618	int err;
619
620	if (!memchr_inv(mask, 0, len))
621		return 0;
622	err = nla_put(skb, val_type, len, val);
623	if (err)
624		return err;
625	if (mask_type != TCA_FLOWER_UNSPEC) {
626		err = nla_put(skb, mask_type, len, mask);
627		if (err)
628			return err;
629	}
630	return 0;
631}
632
633static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
634		   struct sk_buff *skb, struct tcmsg *t)
635{
636	struct cls_fl_head *head = rtnl_dereference(tp->root);
637	struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
638	struct nlattr *nest;
639	struct fl_flow_key *key, *mask;
640
641	if (!f)
642		return skb->len;
643
644	t->tcm_handle = f->handle;
645
646	nest = nla_nest_start(skb, TCA_OPTIONS);
647	if (!nest)
648		goto nla_put_failure;
649
650	if (f->res.classid &&
651	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
652		goto nla_put_failure;
653
654	key = &f->key;
655	mask = &head->mask.key;
656
657	if (mask->indev_ifindex) {
658		struct net_device *dev;
659
660		dev = __dev_get_by_index(net, key->indev_ifindex);
661		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
662			goto nla_put_failure;
663	}
664
665	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
666			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
667			    sizeof(key->eth.dst)) ||
668	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
669			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
670			    sizeof(key->eth.src)) ||
671	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
672			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
673			    sizeof(key->basic.n_proto)))
674		goto nla_put_failure;
675	if ((key->basic.n_proto == htons(ETH_P_IP) ||
676	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
677	    fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
678			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
679			    sizeof(key->basic.ip_proto)))
680		goto nla_put_failure;
681
682	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
683	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
684			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
685			     sizeof(key->ipv4.src)) ||
686	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
687			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
688			     sizeof(key->ipv4.dst))))
689		goto nla_put_failure;
690	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
691		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
692				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
693				  sizeof(key->ipv6.src)) ||
694		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
695				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
696				  sizeof(key->ipv6.dst))))
697		goto nla_put_failure;
698
699	if (key->basic.ip_proto == IPPROTO_TCP &&
700	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
701			     &mask->tp.src, TCA_FLOWER_UNSPEC,
702			     sizeof(key->tp.src)) ||
703	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
704			     &mask->tp.dst, TCA_FLOWER_UNSPEC,
705			     sizeof(key->tp.dst))))
706		goto nla_put_failure;
707	else if (key->basic.ip_proto == IPPROTO_UDP &&
708		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
709				  &mask->tp.src, TCA_FLOWER_UNSPEC,
710				  sizeof(key->tp.src)) ||
711		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
712				  &mask->tp.dst, TCA_FLOWER_UNSPEC,
713				  sizeof(key->tp.dst))))
714		goto nla_put_failure;
715
716	if (tcf_exts_dump(skb, &f->exts))
717		goto nla_put_failure;
718
719	nla_nest_end(skb, nest);
720
721	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
722		goto nla_put_failure;
723
724	return skb->len;
725
726nla_put_failure:
727	nla_nest_cancel(skb, nest);
728	return -1;
729}
730
731static struct tcf_proto_ops cls_fl_ops __read_mostly = {
732	.kind		= "flower",
733	.classify	= fl_classify,
734	.init		= fl_init,
735	.destroy	= fl_destroy,
736	.get		= fl_get,
737	.change		= fl_change,
738	.delete		= fl_delete,
739	.walk		= fl_walk,
740	.dump		= fl_dump,
741	.owner		= THIS_MODULE,
742};
743
744static int __init cls_fl_init(void)
745{
746	return register_tcf_proto_ops(&cls_fl_ops);
747}
748
749static void __exit cls_fl_exit(void)
750{
751	unregister_tcf_proto_ops(&cls_fl_ops);
752}
753
754module_init(cls_fl_init);
755module_exit(cls_fl_exit);
756
757MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
758MODULE_DESCRIPTION("Flower classifier");
759MODULE_LICENSE("GPL v2");