Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1/*
  2 * net/sched/ife.c	Inter-FE action based on ForCES WG InterFE LFB
  3 *
  4 *		Refer to:
  5 *		draft-ietf-forces-interfelfb-03
  6 *		and
  7 *		netdev01 paper:
  8 *		"Distributing Linux Traffic Control Classifier-Action
  9 *		Subsystem"
 10 *		Authors: Jamal Hadi Salim and Damascene M. Joachimpillai
 11 *
 12 *		This program is free software; you can redistribute it and/or
 13 *		modify it under the terms of the GNU General Public License
 14 *		as published by the Free Software Foundation; either version
 15 *		2 of the License, or (at your option) any later version.
 16 *
 17 * copyright Jamal Hadi Salim (2015)
 18 *
 19*/
 20
 21#include <linux/types.h>
 22#include <linux/kernel.h>
 23#include <linux/string.h>
 24#include <linux/errno.h>
 25#include <linux/skbuff.h>
 26#include <linux/rtnetlink.h>
 27#include <linux/module.h>
 28#include <linux/init.h>
 29#include <net/net_namespace.h>
 30#include <net/netlink.h>
 31#include <net/pkt_sched.h>
 
 32#include <uapi/linux/tc_act/tc_ife.h>
 33#include <net/tc_act/tc_ife.h>
 34#include <linux/etherdevice.h>
 
 
 35
 36#define IFE_TAB_MASK 15
 37
 38static unsigned int ife_net_id;
 39static int max_metacnt = IFE_META_MAX + 1;
 40static struct tc_action_ops act_ife_ops;
 41
 42static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = {
 43	[TCA_IFE_PARMS] = { .len = sizeof(struct tc_ife)},
 44	[TCA_IFE_DMAC] = { .len = ETH_ALEN},
 45	[TCA_IFE_SMAC] = { .len = ETH_ALEN},
 46	[TCA_IFE_TYPE] = { .type = NLA_U16},
 47};
 48
 49/* Caller takes care of presenting data in network order
 50*/
 51int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
 52{
 53	u32 *tlv = (u32 *)(skbdata);
 54	u16 totlen = nla_total_size(dlen);	/*alignment + hdr */
 55	char *dptr = (char *)tlv + NLA_HDRLEN;
 56	u32 htlv = attrtype << 16 | (dlen + NLA_HDRLEN);
 57
 58	*tlv = htonl(htlv);
 59	memset(dptr, 0, totlen - NLA_HDRLEN);
 60	memcpy(dptr, dval, dlen);
 61
 62	return totlen;
 63}
 64EXPORT_SYMBOL_GPL(ife_tlv_meta_encode);
 65
 66int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi)
 67{
 68	u16 edata = 0;
 69
 70	if (mi->metaval)
 71		edata = *(u16 *)mi->metaval;
 72	else if (metaval)
 73		edata = metaval;
 74
 75	if (!edata) /* will not encode */
 76		return 0;
 77
 78	edata = htons(edata);
 79	return ife_tlv_meta_encode(skbdata, mi->metaid, 2, &edata);
 80}
 81EXPORT_SYMBOL_GPL(ife_encode_meta_u16);
 82
 83int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi)
 84{
 85	if (mi->metaval)
 86		return nla_put_u32(skb, mi->metaid, *(u32 *)mi->metaval);
 87	else
 88		return nla_put(skb, mi->metaid, 0, NULL);
 89}
 90EXPORT_SYMBOL_GPL(ife_get_meta_u32);
 91
 92int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi)
 93{
 94	if (metaval || mi->metaval)
 95		return 8; /* T+L+V == 2+2+4 */
 96
 97	return 0;
 98}
 99EXPORT_SYMBOL_GPL(ife_check_meta_u32);
100
101int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi)
102{
103	if (metaval || mi->metaval)
104		return 8; /* T+L+(V) == 2+2+(2+2bytepad) */
105
106	return 0;
107}
108EXPORT_SYMBOL_GPL(ife_check_meta_u16);
109
110int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi)
111{
112	u32 edata = metaval;
113
114	if (mi->metaval)
115		edata = *(u32 *)mi->metaval;
116	else if (metaval)
117		edata = metaval;
118
119	if (!edata) /* will not encode */
120		return 0;
121
122	edata = htonl(edata);
123	return ife_tlv_meta_encode(skbdata, mi->metaid, 4, &edata);
124}
125EXPORT_SYMBOL_GPL(ife_encode_meta_u32);
126
127int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi)
128{
129	if (mi->metaval)
130		return nla_put_u16(skb, mi->metaid, *(u16 *)mi->metaval);
131	else
132		return nla_put(skb, mi->metaid, 0, NULL);
133}
134EXPORT_SYMBOL_GPL(ife_get_meta_u16);
135
136int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
137{
138	mi->metaval = kmemdup(metaval, sizeof(u32), gfp);
139	if (!mi->metaval)
140		return -ENOMEM;
141
142	return 0;
143}
144EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
145
146int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
147{
148	mi->metaval = kmemdup(metaval, sizeof(u16), gfp);
149	if (!mi->metaval)
150		return -ENOMEM;
151
152	return 0;
153}
154EXPORT_SYMBOL_GPL(ife_alloc_meta_u16);
155
156void ife_release_meta_gen(struct tcf_meta_info *mi)
157{
158	kfree(mi->metaval);
159}
160EXPORT_SYMBOL_GPL(ife_release_meta_gen);
161
162int ife_validate_meta_u32(void *val, int len)
163{
164	if (len == sizeof(u32))
165		return 0;
166
167	return -EINVAL;
168}
169EXPORT_SYMBOL_GPL(ife_validate_meta_u32);
170
171int ife_validate_meta_u16(void *val, int len)
172{
173	/* length will not include padding */
174	if (len == sizeof(u16))
175		return 0;
176
177	return -EINVAL;
178}
179EXPORT_SYMBOL_GPL(ife_validate_meta_u16);
180
181static LIST_HEAD(ifeoplist);
182static DEFINE_RWLOCK(ife_mod_lock);
183
184static struct tcf_meta_ops *find_ife_oplist(u16 metaid)
185{
186	struct tcf_meta_ops *o;
187
188	read_lock(&ife_mod_lock);
189	list_for_each_entry(o, &ifeoplist, list) {
190		if (o->metaid == metaid) {
191			if (!try_module_get(o->owner))
192				o = NULL;
193			read_unlock(&ife_mod_lock);
194			return o;
195		}
196	}
197	read_unlock(&ife_mod_lock);
198
199	return NULL;
200}
201
202int register_ife_op(struct tcf_meta_ops *mops)
203{
204	struct tcf_meta_ops *m;
205
206	if (!mops->metaid || !mops->metatype || !mops->name ||
207	    !mops->check_presence || !mops->encode || !mops->decode ||
208	    !mops->get || !mops->alloc)
209		return -EINVAL;
210
211	write_lock(&ife_mod_lock);
212
213	list_for_each_entry(m, &ifeoplist, list) {
214		if (m->metaid == mops->metaid ||
215		    (strcmp(mops->name, m->name) == 0)) {
216			write_unlock(&ife_mod_lock);
217			return -EEXIST;
218		}
219	}
220
221	if (!mops->release)
222		mops->release = ife_release_meta_gen;
223
224	list_add_tail(&mops->list, &ifeoplist);
225	write_unlock(&ife_mod_lock);
226	return 0;
227}
228EXPORT_SYMBOL_GPL(unregister_ife_op);
229
230int unregister_ife_op(struct tcf_meta_ops *mops)
231{
232	struct tcf_meta_ops *m;
233	int err = -ENOENT;
234
235	write_lock(&ife_mod_lock);
236	list_for_each_entry(m, &ifeoplist, list) {
237		if (m->metaid == mops->metaid) {
238			list_del(&mops->list);
239			err = 0;
240			break;
241		}
242	}
243	write_unlock(&ife_mod_lock);
244
245	return err;
246}
247EXPORT_SYMBOL_GPL(register_ife_op);
248
249static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
250{
251	int ret = 0;
252	/* XXX: unfortunately cant use nla_policy at this point
253	* because a length of 0 is valid in the case of
254	* "allow". "use" semantics do enforce for proper
255	* length and i couldve use nla_policy but it makes it hard
256	* to use it just for that..
257	*/
258	if (ops->validate)
259		return ops->validate(val, len);
260
261	if (ops->metatype == NLA_U32)
262		ret = ife_validate_meta_u32(val, len);
263	else if (ops->metatype == NLA_U16)
264		ret = ife_validate_meta_u16(val, len);
265
266	return ret;
267}
268
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269/* called when adding new meta information
270 * under ife->tcf_lock for existing action
271*/
272static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
273				void *val, int len, bool exists)
274{
275	struct tcf_meta_ops *ops = find_ife_oplist(metaid);
276	int ret = 0;
277
278	if (!ops) {
279		ret = -ENOENT;
280#ifdef CONFIG_MODULES
281		if (exists)
282			spin_unlock_bh(&ife->tcf_lock);
283		rtnl_unlock();
284		request_module("ifemeta%u", metaid);
285		rtnl_lock();
286		if (exists)
287			spin_lock_bh(&ife->tcf_lock);
288		ops = find_ife_oplist(metaid);
289#endif
290	}
291
292	if (ops) {
293		ret = 0;
294		if (len)
295			ret = ife_validate_metatype(ops, val, len);
296
297		module_put(ops->owner);
298	}
299
300	return ret;
301}
302
303/* called when adding new meta information
304 * under ife->tcf_lock for existing action
305*/
306static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
307			int len, bool atomic)
 
308{
309	struct tcf_meta_info *mi = NULL;
310	struct tcf_meta_ops *ops = find_ife_oplist(metaid);
311	int ret = 0;
312
313	if (!ops)
314		return -ENOENT;
315
316	mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
317	if (!mi) {
318		/*put back what find_ife_oplist took */
319		module_put(ops->owner);
320		return -ENOMEM;
321	}
322
323	mi->metaid = metaid;
324	mi->ops = ops;
325	if (len > 0) {
326		ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
327		if (ret != 0) {
328			kfree(mi);
329			module_put(ops->owner);
330			return ret;
331		}
332	}
333
 
 
334	list_add_tail(&mi->metalist, &ife->metalist);
 
 
335
336	return ret;
337}
338
339static int use_all_metadata(struct tcf_ife_info *ife)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340{
341	struct tcf_meta_ops *o;
342	int rc = 0;
343	int installed = 0;
344
345	read_lock(&ife_mod_lock);
346	list_for_each_entry(o, &ifeoplist, list) {
347		rc = add_metainfo(ife, o->metaid, NULL, 0, true);
348		if (rc == 0)
349			installed += 1;
350	}
351	read_unlock(&ife_mod_lock);
352
353	if (installed)
354		return 0;
355	else
356		return -EINVAL;
357}
358
359static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
360{
361	struct tcf_meta_info *e;
362	struct nlattr *nest;
363	unsigned char *b = skb_tail_pointer(skb);
364	int total_encoded = 0;
365
366	/*can only happen on decode */
367	if (list_empty(&ife->metalist))
368		return 0;
369
370	nest = nla_nest_start(skb, TCA_IFE_METALST);
371	if (!nest)
372		goto out_nlmsg_trim;
373
374	list_for_each_entry(e, &ife->metalist, metalist) {
375		if (!e->ops->get(skb, e))
376			total_encoded += 1;
377	}
378
379	if (!total_encoded)
380		goto out_nlmsg_trim;
381
382	nla_nest_end(skb, nest);
383
384	return 0;
385
386out_nlmsg_trim:
387	nlmsg_trim(skb, b);
388	return -1;
389}
390
391/* under ife->tcf_lock */
392static void _tcf_ife_cleanup(struct tc_action *a, int bind)
393{
394	struct tcf_ife_info *ife = to_ife(a);
395	struct tcf_meta_info *e, *n;
396
397	list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
398		module_put(e->ops->owner);
399		list_del(&e->metalist);
400		if (e->metaval) {
401			if (e->ops->release)
402				e->ops->release(e);
403			else
404				kfree(e->metaval);
405		}
 
406		kfree(e);
407	}
408}
409
410static void tcf_ife_cleanup(struct tc_action *a, int bind)
411{
412	struct tcf_ife_info *ife = to_ife(a);
 
413
414	spin_lock_bh(&ife->tcf_lock);
415	_tcf_ife_cleanup(a, bind);
416	spin_unlock_bh(&ife->tcf_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417}
418
419/* under ife->tcf_lock for existing action */
420static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
421			     bool exists)
422{
423	int len = 0;
424	int rc = 0;
425	int i = 0;
426	void *val;
427
428	for (i = 1; i < max_metacnt; i++) {
429		if (tb[i]) {
430			val = nla_data(tb[i]);
431			len = nla_len(tb[i]);
432
433			rc = load_metaops_and_vet(ife, i, val, len, exists);
434			if (rc != 0)
435				return rc;
436
437			rc = add_metainfo(ife, i, val, len, exists);
438			if (rc)
439				return rc;
440		}
441	}
442
443	return rc;
444}
445
446static int tcf_ife_init(struct net *net, struct nlattr *nla,
447			struct nlattr *est, struct tc_action **a,
448			int ovr, int bind)
 
449{
450	struct tc_action_net *tn = net_generic(net, ife_net_id);
 
451	struct nlattr *tb[TCA_IFE_MAX + 1];
452	struct nlattr *tb2[IFE_META_MAX + 1];
 
 
453	struct tcf_ife_info *ife;
 
454	struct tc_ife *parm;
455	u16 ife_type = 0;
456	u8 *daddr = NULL;
457	u8 *saddr = NULL;
458	bool exists = false;
459	int ret = 0;
 
460	int err;
461
462	err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy);
 
 
 
 
 
 
463	if (err < 0)
464		return err;
465
466	if (!tb[TCA_IFE_PARMS])
467		return -EINVAL;
468
469	parm = nla_data(tb[TCA_IFE_PARMS]);
470
471	exists = tcf_hash_check(tn, parm->index, a, bind);
472	if (exists && bind)
473		return 0;
 
 
 
474
475	if (parm->flags & IFE_ENCODE) {
476		/* Until we get issued the ethertype, we cant have
477		 * a default..
478		**/
479		if (!tb[TCA_IFE_TYPE]) {
480			if (exists)
481				tcf_hash_release(*a, bind);
482			pr_info("You MUST pass etherype for encoding\n");
483			return -EINVAL;
 
 
 
 
 
 
 
484		}
485	}
486
 
 
 
 
 
 
 
 
 
 
 
 
487	if (!exists) {
488		ret = tcf_hash_create(tn, parm->index, est, a, &act_ife_ops,
489				      bind, false);
490		if (ret)
 
 
491			return ret;
 
492		ret = ACT_P_CREATED;
493	} else {
494		tcf_hash_release(*a, bind);
495		if (!ovr)
496			return -EEXIST;
497	}
498
499	ife = to_ife(*a);
500	ife->flags = parm->flags;
 
 
 
 
 
 
 
501
502	if (parm->flags & IFE_ENCODE) {
503		ife_type = nla_get_u16(tb[TCA_IFE_TYPE]);
 
504		if (tb[TCA_IFE_DMAC])
505			daddr = nla_data(tb[TCA_IFE_DMAC]);
506		if (tb[TCA_IFE_SMAC])
507			saddr = nla_data(tb[TCA_IFE_SMAC]);
508	}
509
510	if (exists)
511		spin_lock_bh(&ife->tcf_lock);
512	ife->tcf_action = parm->action;
513
514	if (parm->flags & IFE_ENCODE) {
515		if (daddr)
516			ether_addr_copy(ife->eth_dst, daddr);
517		else
518			eth_zero_addr(ife->eth_dst);
519
520		if (saddr)
521			ether_addr_copy(ife->eth_src, saddr);
522		else
523			eth_zero_addr(ife->eth_src);
524
525		ife->eth_type = ife_type;
526	}
527
528	if (ret == ACT_P_CREATED)
529		INIT_LIST_HEAD(&ife->metalist);
530
531	if (tb[TCA_IFE_METALST]) {
532		err = nla_parse_nested(tb2, IFE_META_MAX, tb[TCA_IFE_METALST],
533				       NULL);
534		if (err) {
535metadata_parse_err:
536			if (exists)
537				tcf_hash_release(*a, bind);
538			if (ret == ACT_P_CREATED)
539				_tcf_ife_cleanup(*a, bind);
540
541			if (exists)
542				spin_unlock_bh(&ife->tcf_lock);
543			return err;
544		}
545
546		err = populate_metalist(ife, tb2, exists);
547		if (err)
548			goto metadata_parse_err;
549
550	} else {
551		/* if no passed metadata allow list or passed allow-all
552		 * then here we process by adding as many supported metadatum
553		 * as we can. You better have at least one else we are
554		 * going to bail out
555		 */
556		err = use_all_metadata(ife);
557		if (err) {
558			if (ret == ACT_P_CREATED)
559				_tcf_ife_cleanup(*a, bind);
560
561			if (exists)
562				spin_unlock_bh(&ife->tcf_lock);
563			return err;
564		}
565	}
566
567	if (exists)
568		spin_unlock_bh(&ife->tcf_lock);
 
 
 
569
570	if (ret == ACT_P_CREATED)
571		tcf_hash_insert(tn, *a);
 
 
 
 
572
573	return ret;
 
 
 
 
 
 
 
574}
575
576static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
577			int ref)
578{
579	unsigned char *b = skb_tail_pointer(skb);
580	struct tcf_ife_info *ife = to_ife(a);
 
581	struct tc_ife opt = {
582		.index = ife->tcf_index,
583		.refcnt = ife->tcf_refcnt - ref,
584		.bindcnt = ife->tcf_bindcnt - bind,
585		.action = ife->tcf_action,
586		.flags = ife->flags,
587	};
588	struct tcf_t t;
589
 
 
 
 
 
 
590	if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt))
591		goto nla_put_failure;
592
593	tcf_tm_dump(&t, &ife->tcf_tm);
594	if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD))
595		goto nla_put_failure;
596
597	if (!is_zero_ether_addr(ife->eth_dst)) {
598		if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, ife->eth_dst))
599			goto nla_put_failure;
600	}
601
602	if (!is_zero_ether_addr(ife->eth_src)) {
603		if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, ife->eth_src))
604			goto nla_put_failure;
605	}
606
607	if (nla_put(skb, TCA_IFE_TYPE, 2, &ife->eth_type))
608		goto nla_put_failure;
609
610	if (dump_metalist(skb, ife)) {
611		/*ignore failure to dump metalist */
612		pr_info("Failed to dump metalist\n");
613	}
614
 
615	return skb->len;
616
617nla_put_failure:
 
618	nlmsg_trim(skb, b);
619	return -1;
620}
621
622int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
623		       u16 metaid, u16 mlen, void *mdata)
624{
625	struct tcf_meta_info *e;
626
627	/* XXX: use hash to speed up */
628	list_for_each_entry(e, &ife->metalist, metalist) {
629		if (metaid == e->metaid) {
630			if (e->ops) {
631				/* We check for decode presence already */
632				return e->ops->decode(skb, mdata, mlen);
633			}
634		}
635	}
636
637	return 0;
638}
639
640struct ifeheadr {
641	__be16 metalen;
642	u8 tlv_data[];
643};
644
645struct meta_tlvhdr {
646	__be16 type;
647	__be16 len;
648};
649
650static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
651			  struct tcf_result *res)
652{
653	struct tcf_ife_info *ife = to_ife(a);
654	int action = ife->tcf_action;
655	struct ifeheadr *ifehdr = (struct ifeheadr *)skb->data;
656	int ifehdrln = (int)ifehdr->metalen;
657	struct meta_tlvhdr *tlv = (struct meta_tlvhdr *)(ifehdr->tlv_data);
658
659	spin_lock(&ife->tcf_lock);
660	bstats_update(&ife->tcf_bstats, skb);
661	tcf_lastuse_update(&ife->tcf_tm);
662	spin_unlock(&ife->tcf_lock);
663
664	ifehdrln = ntohs(ifehdrln);
665	if (unlikely(!pskb_may_pull(skb, ifehdrln))) {
666		spin_lock(&ife->tcf_lock);
667		ife->tcf_qstats.drops++;
668		spin_unlock(&ife->tcf_lock);
 
669		return TC_ACT_SHOT;
670	}
671
672	skb_set_mac_header(skb, ifehdrln);
673	__skb_pull(skb, ifehdrln);
674	skb->protocol = eth_type_trans(skb, skb->dev);
675	ifehdrln -= IFE_METAHDRLEN;
676
677	while (ifehdrln > 0) {
678		u8 *tlvdata = (u8 *)tlv;
679		u16 mtype = tlv->type;
680		u16 mlen = tlv->len;
681		u16 alen;
682
683		mtype = ntohs(mtype);
684		mlen = ntohs(mlen);
685		alen = NLA_ALIGN(mlen);
686
687		if (find_decode_metaid(skb, ife, mtype, (mlen - NLA_HDRLEN),
688				       (void *)(tlvdata + NLA_HDRLEN))) {
689			/* abuse overlimits to count when we receive metadata
690			 * but dont have an ops for it
691			 */
692			pr_info_ratelimited("Unknown metaid %d alnlen %d\n",
693					    mtype, mlen);
694			ife->tcf_qstats.overlimits++;
695		}
 
696
697		tlvdata += alen;
698		ifehdrln -= alen;
699		tlv = (struct meta_tlvhdr *)tlvdata;
700	}
701
 
702	skb_reset_network_header(skb);
 
703	return action;
704}
705
706/*XXX: check if we can do this at install time instead of current
707 * send data path
708**/
709static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife)
710{
711	struct tcf_meta_info *e, *n;
712	int tot_run_sz = 0, run_sz = 0;
713
714	list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
715		if (e->ops->check_presence) {
716			run_sz = e->ops->check_presence(skb, e);
717			tot_run_sz += run_sz;
718		}
719	}
720
721	return tot_run_sz;
722}
723
724static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
725			  struct tcf_result *res)
726{
727	struct tcf_ife_info *ife = to_ife(a);
728	int action = ife->tcf_action;
729	struct ethhdr *oethh;	/* outer ether header */
730	struct ethhdr *iethh;	/* inner eth header */
731	struct tcf_meta_info *e;
732	/*
733	   OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
734	   where ORIGDATA = original ethernet header ...
735	 */
736	u16 metalen = ife_get_sz(skb, ife);
737	int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN;
738	unsigned int skboff = skb->dev->hard_header_len;
739	u32 at = G_TC_AT(skb->tc_verd);
740	int new_len = skb->len + hdrm;
741	bool exceed_mtu = false;
742	int err;
 
743
744	if (at & AT_EGRESS) {
745		if (new_len > skb->dev->mtu)
746			exceed_mtu = true;
747	}
748
749	spin_lock(&ife->tcf_lock);
750	bstats_update(&ife->tcf_bstats, skb);
751	tcf_lastuse_update(&ife->tcf_tm);
752
753	if (!metalen) {		/* no metadata to send */
754		/* abuse overlimits to count when we allow packet
755		 * with no metadata
756		 */
757		ife->tcf_qstats.overlimits++;
758		spin_unlock(&ife->tcf_lock);
759		return action;
760	}
761	/* could be stupid policy setup or mtu config
762	 * so lets be conservative.. */
763	if ((action == TC_ACT_SHOT) || exceed_mtu) {
764		ife->tcf_qstats.drops++;
765		spin_unlock(&ife->tcf_lock);
766		return TC_ACT_SHOT;
767	}
768
769	err = skb_cow_head(skb, hdrm);
770	if (unlikely(err)) {
771		ife->tcf_qstats.drops++;
772		spin_unlock(&ife->tcf_lock);
773		return TC_ACT_SHOT;
774	}
775
776	if (!(at & AT_EGRESS))
777		skb_push(skb, skb->dev->hard_header_len);
778
779	iethh = (struct ethhdr *)skb->data;
780	__skb_push(skb, hdrm);
781	memcpy(skb->data, iethh, skb->mac_len);
782	skb_reset_mac_header(skb);
783	oethh = eth_hdr(skb);
784
785	/*total metadata length */
786	metalen += IFE_METAHDRLEN;
787	metalen = htons(metalen);
788	memcpy((skb->data + skboff), &metalen, IFE_METAHDRLEN);
789	skboff += IFE_METAHDRLEN;
790
791	/* XXX: we dont have a clever way of telling encode to
792	 * not repeat some of the computations that are done by
793	 * ops->presence_check...
794	 */
795	list_for_each_entry(e, &ife->metalist, metalist) {
796		if (e->ops->encode) {
797			err = e->ops->encode(skb, (void *)(skb->data + skboff),
798					     e);
799		}
800		if (err < 0) {
801			/* too corrupt to keep around if overwritten */
802			ife->tcf_qstats.drops++;
803			spin_unlock(&ife->tcf_lock);
 
804			return TC_ACT_SHOT;
805		}
806		skboff += err;
807	}
 
 
808
809	if (!is_zero_ether_addr(ife->eth_src))
810		ether_addr_copy(oethh->h_source, ife->eth_src);
811	else
812		ether_addr_copy(oethh->h_source, iethh->h_source);
813	if (!is_zero_ether_addr(ife->eth_dst))
814		ether_addr_copy(oethh->h_dest, ife->eth_dst);
815	else
816		ether_addr_copy(oethh->h_dest, iethh->h_dest);
817	oethh->h_proto = htons(ife->eth_type);
818
819	if (!(at & AT_EGRESS))
820		skb_pull(skb, skb->dev->hard_header_len);
821
822	spin_unlock(&ife->tcf_lock);
823
824	return action;
825}
826
827static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a,
828		       struct tcf_result *res)
 
829{
830	struct tcf_ife_info *ife = to_ife(a);
 
 
831
832	if (ife->flags & IFE_ENCODE)
833		return tcf_ife_encode(skb, a, res);
834
835	if (!(ife->flags & IFE_ENCODE))
836		return tcf_ife_decode(skb, a, res);
837
838	pr_info_ratelimited("unknown failure(policy neither de/encode\n");
839	spin_lock(&ife->tcf_lock);
840	bstats_update(&ife->tcf_bstats, skb);
841	tcf_lastuse_update(&ife->tcf_tm);
842	ife->tcf_qstats.drops++;
843	spin_unlock(&ife->tcf_lock);
844
845	return TC_ACT_SHOT;
846}
847
848static int tcf_ife_walker(struct net *net, struct sk_buff *skb,
849			  struct netlink_callback *cb, int type,
850			  const struct tc_action_ops *ops)
851{
852	struct tc_action_net *tn = net_generic(net, ife_net_id);
853
854	return tcf_generic_walker(tn, skb, cb, type, ops);
855}
856
857static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index)
858{
859	struct tc_action_net *tn = net_generic(net, ife_net_id);
860
861	return tcf_hash_search(tn, a, index);
862}
863
864static struct tc_action_ops act_ife_ops = {
865	.kind = "ife",
866	.type = TCA_ACT_IFE,
867	.owner = THIS_MODULE,
868	.act = tcf_ife_act,
869	.dump = tcf_ife_dump,
870	.cleanup = tcf_ife_cleanup,
871	.init = tcf_ife_init,
872	.walk = tcf_ife_walker,
873	.lookup = tcf_ife_search,
874	.size =	sizeof(struct tcf_ife_info),
875};
 
876
877static __net_init int ife_init_net(struct net *net)
878{
879	struct tc_action_net *tn = net_generic(net, ife_net_id);
880
881	return tc_action_net_init(tn, &act_ife_ops, IFE_TAB_MASK);
882}
883
884static void __net_exit ife_exit_net(struct net *net)
885{
886	struct tc_action_net *tn = net_generic(net, ife_net_id);
887
888	tc_action_net_exit(tn);
889}
890
891static struct pernet_operations ife_net_ops = {
892	.init = ife_init_net,
893	.exit = ife_exit_net,
894	.id   = &ife_net_id,
895	.size = sizeof(struct tc_action_net),
896};
897
898static int __init ife_init_module(void)
899{
900	return tcf_register_action(&act_ife_ops, &ife_net_ops);
901}
902
903static void __exit ife_cleanup_module(void)
904{
905	tcf_unregister_action(&act_ife_ops, &ife_net_ops);
906}
907
908module_init(ife_init_module);
909module_exit(ife_cleanup_module);
910
911MODULE_AUTHOR("Jamal Hadi Salim(2015)");
912MODULE_DESCRIPTION("Inter-FE LFB action");
913MODULE_LICENSE("GPL");
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/ife.c	Inter-FE action based on ForCES WG InterFE LFB
  4 *
  5 *		Refer to:
  6 *		draft-ietf-forces-interfelfb-03
  7 *		and
  8 *		netdev01 paper:
  9 *		"Distributing Linux Traffic Control Classifier-Action
 10 *		Subsystem"
 11 *		Authors: Jamal Hadi Salim and Damascene M. Joachimpillai
 12 *
 
 
 
 
 
 13 * copyright Jamal Hadi Salim (2015)
 
 14*/
 15
 16#include <linux/types.h>
 17#include <linux/kernel.h>
 18#include <linux/string.h>
 19#include <linux/errno.h>
 20#include <linux/skbuff.h>
 21#include <linux/rtnetlink.h>
 22#include <linux/module.h>
 23#include <linux/init.h>
 24#include <net/net_namespace.h>
 25#include <net/netlink.h>
 26#include <net/pkt_sched.h>
 27#include <net/pkt_cls.h>
 28#include <uapi/linux/tc_act/tc_ife.h>
 29#include <net/tc_act/tc_ife.h>
 30#include <linux/etherdevice.h>
 31#include <net/ife.h>
 32#include <net/tc_wrapper.h>
 33
 
 
 
 34static int max_metacnt = IFE_META_MAX + 1;
 35static struct tc_action_ops act_ife_ops;
 36
 37static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = {
 38	[TCA_IFE_PARMS] = { .len = sizeof(struct tc_ife)},
 39	[TCA_IFE_DMAC] = { .len = ETH_ALEN},
 40	[TCA_IFE_SMAC] = { .len = ETH_ALEN},
 41	[TCA_IFE_TYPE] = { .type = NLA_U16},
 42};
 43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi)
 45{
 46	u16 edata = 0;
 47
 48	if (mi->metaval)
 49		edata = *(u16 *)mi->metaval;
 50	else if (metaval)
 51		edata = metaval;
 52
 53	if (!edata) /* will not encode */
 54		return 0;
 55
 56	edata = htons(edata);
 57	return ife_tlv_meta_encode(skbdata, mi->metaid, 2, &edata);
 58}
 59EXPORT_SYMBOL_GPL(ife_encode_meta_u16);
 60
 61int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi)
 62{
 63	if (mi->metaval)
 64		return nla_put_u32(skb, mi->metaid, *(u32 *)mi->metaval);
 65	else
 66		return nla_put(skb, mi->metaid, 0, NULL);
 67}
 68EXPORT_SYMBOL_GPL(ife_get_meta_u32);
 69
 70int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi)
 71{
 72	if (metaval || mi->metaval)
 73		return 8; /* T+L+V == 2+2+4 */
 74
 75	return 0;
 76}
 77EXPORT_SYMBOL_GPL(ife_check_meta_u32);
 78
 79int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi)
 80{
 81	if (metaval || mi->metaval)
 82		return 8; /* T+L+(V) == 2+2+(2+2bytepad) */
 83
 84	return 0;
 85}
 86EXPORT_SYMBOL_GPL(ife_check_meta_u16);
 87
 88int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi)
 89{
 90	u32 edata = metaval;
 91
 92	if (mi->metaval)
 93		edata = *(u32 *)mi->metaval;
 94	else if (metaval)
 95		edata = metaval;
 96
 97	if (!edata) /* will not encode */
 98		return 0;
 99
100	edata = htonl(edata);
101	return ife_tlv_meta_encode(skbdata, mi->metaid, 4, &edata);
102}
103EXPORT_SYMBOL_GPL(ife_encode_meta_u32);
104
105int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi)
106{
107	if (mi->metaval)
108		return nla_put_u16(skb, mi->metaid, *(u16 *)mi->metaval);
109	else
110		return nla_put(skb, mi->metaid, 0, NULL);
111}
112EXPORT_SYMBOL_GPL(ife_get_meta_u16);
113
114int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
115{
116	mi->metaval = kmemdup(metaval, sizeof(u32), gfp);
117	if (!mi->metaval)
118		return -ENOMEM;
119
120	return 0;
121}
122EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
123
124int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
125{
126	mi->metaval = kmemdup(metaval, sizeof(u16), gfp);
127	if (!mi->metaval)
128		return -ENOMEM;
129
130	return 0;
131}
132EXPORT_SYMBOL_GPL(ife_alloc_meta_u16);
133
134void ife_release_meta_gen(struct tcf_meta_info *mi)
135{
136	kfree(mi->metaval);
137}
138EXPORT_SYMBOL_GPL(ife_release_meta_gen);
139
140int ife_validate_meta_u32(void *val, int len)
141{
142	if (len == sizeof(u32))
143		return 0;
144
145	return -EINVAL;
146}
147EXPORT_SYMBOL_GPL(ife_validate_meta_u32);
148
149int ife_validate_meta_u16(void *val, int len)
150{
151	/* length will not include padding */
152	if (len == sizeof(u16))
153		return 0;
154
155	return -EINVAL;
156}
157EXPORT_SYMBOL_GPL(ife_validate_meta_u16);
158
159static LIST_HEAD(ifeoplist);
160static DEFINE_RWLOCK(ife_mod_lock);
161
162static struct tcf_meta_ops *find_ife_oplist(u16 metaid)
163{
164	struct tcf_meta_ops *o;
165
166	read_lock(&ife_mod_lock);
167	list_for_each_entry(o, &ifeoplist, list) {
168		if (o->metaid == metaid) {
169			if (!try_module_get(o->owner))
170				o = NULL;
171			read_unlock(&ife_mod_lock);
172			return o;
173		}
174	}
175	read_unlock(&ife_mod_lock);
176
177	return NULL;
178}
179
180int register_ife_op(struct tcf_meta_ops *mops)
181{
182	struct tcf_meta_ops *m;
183
184	if (!mops->metaid || !mops->metatype || !mops->name ||
185	    !mops->check_presence || !mops->encode || !mops->decode ||
186	    !mops->get || !mops->alloc)
187		return -EINVAL;
188
189	write_lock(&ife_mod_lock);
190
191	list_for_each_entry(m, &ifeoplist, list) {
192		if (m->metaid == mops->metaid ||
193		    (strcmp(mops->name, m->name) == 0)) {
194			write_unlock(&ife_mod_lock);
195			return -EEXIST;
196		}
197	}
198
199	if (!mops->release)
200		mops->release = ife_release_meta_gen;
201
202	list_add_tail(&mops->list, &ifeoplist);
203	write_unlock(&ife_mod_lock);
204	return 0;
205}
206EXPORT_SYMBOL_GPL(unregister_ife_op);
207
208int unregister_ife_op(struct tcf_meta_ops *mops)
209{
210	struct tcf_meta_ops *m;
211	int err = -ENOENT;
212
213	write_lock(&ife_mod_lock);
214	list_for_each_entry(m, &ifeoplist, list) {
215		if (m->metaid == mops->metaid) {
216			list_del(&mops->list);
217			err = 0;
218			break;
219		}
220	}
221	write_unlock(&ife_mod_lock);
222
223	return err;
224}
225EXPORT_SYMBOL_GPL(register_ife_op);
226
227static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
228{
229	int ret = 0;
230	/* XXX: unfortunately cant use nla_policy at this point
231	* because a length of 0 is valid in the case of
232	* "allow". "use" semantics do enforce for proper
233	* length and i couldve use nla_policy but it makes it hard
234	* to use it just for that..
235	*/
236	if (ops->validate)
237		return ops->validate(val, len);
238
239	if (ops->metatype == NLA_U32)
240		ret = ife_validate_meta_u32(val, len);
241	else if (ops->metatype == NLA_U16)
242		ret = ife_validate_meta_u16(val, len);
243
244	return ret;
245}
246
247#ifdef CONFIG_MODULES
248static const char *ife_meta_id2name(u32 metaid)
249{
250	switch (metaid) {
251	case IFE_META_SKBMARK:
252		return "skbmark";
253	case IFE_META_PRIO:
254		return "skbprio";
255	case IFE_META_TCINDEX:
256		return "tcindex";
257	default:
258		return "unknown";
259	}
260}
261#endif
262
263/* called when adding new meta information
 
264*/
265static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held)
 
266{
267	struct tcf_meta_ops *ops = find_ife_oplist(metaid);
268	int ret = 0;
269
270	if (!ops) {
271		ret = -ENOENT;
272#ifdef CONFIG_MODULES
273		if (rtnl_held)
274			rtnl_unlock();
275		request_module("ife-meta-%s", ife_meta_id2name(metaid));
276		if (rtnl_held)
277			rtnl_lock();
 
 
278		ops = find_ife_oplist(metaid);
279#endif
280	}
281
282	if (ops) {
283		ret = 0;
284		if (len)
285			ret = ife_validate_metatype(ops, val, len);
286
287		module_put(ops->owner);
288	}
289
290	return ret;
291}
292
293/* called when adding new meta information
 
294*/
295static int __add_metainfo(const struct tcf_meta_ops *ops,
296			  struct tcf_ife_info *ife, u32 metaid, void *metaval,
297			  int len, bool atomic, bool exists)
298{
299	struct tcf_meta_info *mi = NULL;
 
300	int ret = 0;
301
 
 
 
302	mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
303	if (!mi)
 
 
304		return -ENOMEM;
 
305
306	mi->metaid = metaid;
307	mi->ops = ops;
308	if (len > 0) {
309		ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
310		if (ret != 0) {
311			kfree(mi);
 
312			return ret;
313		}
314	}
315
316	if (exists)
317		spin_lock_bh(&ife->tcf_lock);
318	list_add_tail(&mi->metalist, &ife->metalist);
319	if (exists)
320		spin_unlock_bh(&ife->tcf_lock);
321
322	return ret;
323}
324
325static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
326				    struct tcf_ife_info *ife, u32 metaid,
327				    bool exists)
328{
329	int ret;
330
331	if (!try_module_get(ops->owner))
332		return -ENOENT;
333	ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
334	if (ret)
335		module_put(ops->owner);
336	return ret;
337}
338
339static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
340			int len, bool exists)
341{
342	const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
343	int ret;
344
345	if (!ops)
346		return -ENOENT;
347	ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
348	if (ret)
349		/*put back what find_ife_oplist took */
350		module_put(ops->owner);
351	return ret;
352}
353
354static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
355{
356	struct tcf_meta_ops *o;
357	int rc = 0;
358	int installed = 0;
359
360	read_lock(&ife_mod_lock);
361	list_for_each_entry(o, &ifeoplist, list) {
362		rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
363		if (rc == 0)
364			installed += 1;
365	}
366	read_unlock(&ife_mod_lock);
367
368	if (installed)
369		return 0;
370	else
371		return -EINVAL;
372}
373
374static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
375{
376	struct tcf_meta_info *e;
377	struct nlattr *nest;
378	unsigned char *b = skb_tail_pointer(skb);
379	int total_encoded = 0;
380
381	/*can only happen on decode */
382	if (list_empty(&ife->metalist))
383		return 0;
384
385	nest = nla_nest_start_noflag(skb, TCA_IFE_METALST);
386	if (!nest)
387		goto out_nlmsg_trim;
388
389	list_for_each_entry(e, &ife->metalist, metalist) {
390		if (!e->ops->get(skb, e))
391			total_encoded += 1;
392	}
393
394	if (!total_encoded)
395		goto out_nlmsg_trim;
396
397	nla_nest_end(skb, nest);
398
399	return 0;
400
401out_nlmsg_trim:
402	nlmsg_trim(skb, b);
403	return -1;
404}
405
406/* under ife->tcf_lock */
407static void _tcf_ife_cleanup(struct tc_action *a)
408{
409	struct tcf_ife_info *ife = to_ife(a);
410	struct tcf_meta_info *e, *n;
411
412	list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
 
413		list_del(&e->metalist);
414		if (e->metaval) {
415			if (e->ops->release)
416				e->ops->release(e);
417			else
418				kfree(e->metaval);
419		}
420		module_put(e->ops->owner);
421		kfree(e);
422	}
423}
424
425static void tcf_ife_cleanup(struct tc_action *a)
426{
427	struct tcf_ife_info *ife = to_ife(a);
428	struct tcf_ife_params *p;
429
430	spin_lock_bh(&ife->tcf_lock);
431	_tcf_ife_cleanup(a);
432	spin_unlock_bh(&ife->tcf_lock);
433
434	p = rcu_dereference_protected(ife->params, 1);
435	if (p)
436		kfree_rcu(p, rcu);
437}
438
439static int load_metalist(struct nlattr **tb, bool rtnl_held)
440{
441	int i;
442
443	for (i = 1; i < max_metacnt; i++) {
444		if (tb[i]) {
445			void *val = nla_data(tb[i]);
446			int len = nla_len(tb[i]);
447			int rc;
448
449			rc = load_metaops_and_vet(i, val, len, rtnl_held);
450			if (rc != 0)
451				return rc;
452		}
453	}
454
455	return 0;
456}
457
 
458static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
459			     bool exists, bool rtnl_held)
460{
461	int len = 0;
462	int rc = 0;
463	int i = 0;
464	void *val;
465
466	for (i = 1; i < max_metacnt; i++) {
467		if (tb[i]) {
468			val = nla_data(tb[i]);
469			len = nla_len(tb[i]);
470
 
 
 
 
471			rc = add_metainfo(ife, i, val, len, exists);
472			if (rc)
473				return rc;
474		}
475	}
476
477	return rc;
478}
479
480static int tcf_ife_init(struct net *net, struct nlattr *nla,
481			struct nlattr *est, struct tc_action **a,
482			struct tcf_proto *tp, u32 flags,
483			struct netlink_ext_ack *extack)
484{
485	struct tc_action_net *tn = net_generic(net, act_ife_ops.net_id);
486	bool bind = flags & TCA_ACT_FLAGS_BIND;
487	struct nlattr *tb[TCA_IFE_MAX + 1];
488	struct nlattr *tb2[IFE_META_MAX + 1];
489	struct tcf_chain *goto_ch = NULL;
490	struct tcf_ife_params *p;
491	struct tcf_ife_info *ife;
492	u16 ife_type = ETH_P_IFE;
493	struct tc_ife *parm;
 
494	u8 *daddr = NULL;
495	u8 *saddr = NULL;
496	bool exists = false;
497	int ret = 0;
498	u32 index;
499	int err;
500
501	if (!nla) {
502		NL_SET_ERR_MSG_MOD(extack, "IFE requires attributes to be passed");
503		return -EINVAL;
504	}
505
506	err = nla_parse_nested_deprecated(tb, TCA_IFE_MAX, nla, ife_policy,
507					  NULL);
508	if (err < 0)
509		return err;
510
511	if (!tb[TCA_IFE_PARMS])
512		return -EINVAL;
513
514	parm = nla_data(tb[TCA_IFE_PARMS]);
515
516	/* IFE_DECODE is 0 and indicates the opposite of IFE_ENCODE because
517	 * they cannot run as the same time. Check on all other values which
518	 * are not supported right now.
519	 */
520	if (parm->flags & ~IFE_ENCODE)
521		return -EINVAL;
522
523	p = kzalloc(sizeof(*p), GFP_KERNEL);
524	if (!p)
525		return -ENOMEM;
526
527	if (tb[TCA_IFE_METALST]) {
528		err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
529						  tb[TCA_IFE_METALST], NULL,
530						  NULL);
531		if (err) {
532			kfree(p);
533			return err;
534		}
535		err = load_metalist(tb2, !(flags & TCA_ACT_FLAGS_NO_RTNL));
536		if (err) {
537			kfree(p);
538			return err;
539		}
540	}
541
542	index = parm->index;
543	err = tcf_idr_check_alloc(tn, &index, a, bind);
544	if (err < 0) {
545		kfree(p);
546		return err;
547	}
548	exists = err;
549	if (exists && bind) {
550		kfree(p);
551		return ACT_P_BOUND;
552	}
553
554	if (!exists) {
555		ret = tcf_idr_create(tn, index, est, a, &act_ife_ops,
556				     bind, true, flags);
557		if (ret) {
558			tcf_idr_cleanup(tn, index);
559			kfree(p);
560			return ret;
561		}
562		ret = ACT_P_CREATED;
563	} else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
564		tcf_idr_release(*a, bind);
565		kfree(p);
566		return -EEXIST;
567	}
568
569	ife = to_ife(*a);
570	if (ret == ACT_P_CREATED)
571		INIT_LIST_HEAD(&ife->metalist);
572
573	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
574	if (err < 0)
575		goto release_idr;
576
577	p->flags = parm->flags;
578
579	if (parm->flags & IFE_ENCODE) {
580		if (tb[TCA_IFE_TYPE])
581			ife_type = nla_get_u16(tb[TCA_IFE_TYPE]);
582		if (tb[TCA_IFE_DMAC])
583			daddr = nla_data(tb[TCA_IFE_DMAC]);
584		if (tb[TCA_IFE_SMAC])
585			saddr = nla_data(tb[TCA_IFE_SMAC]);
586	}
587
 
 
 
 
588	if (parm->flags & IFE_ENCODE) {
589		if (daddr)
590			ether_addr_copy(p->eth_dst, daddr);
591		else
592			eth_zero_addr(p->eth_dst);
593
594		if (saddr)
595			ether_addr_copy(p->eth_src, saddr);
596		else
597			eth_zero_addr(p->eth_src);
598
599		p->eth_type = ife_type;
600	}
601
 
 
 
602	if (tb[TCA_IFE_METALST]) {
603		err = populate_metalist(ife, tb2, exists,
604					!(flags & TCA_ACT_FLAGS_NO_RTNL));
 
 
 
 
 
 
 
 
 
 
 
 
 
605		if (err)
606			goto metadata_parse_err;
 
607	} else {
608		/* if no passed metadata allow list or passed allow-all
609		 * then here we process by adding as many supported metadatum
610		 * as we can. You better have at least one else we are
611		 * going to bail out
612		 */
613		err = use_all_metadata(ife, exists);
614		if (err)
615			goto metadata_parse_err;
 
 
 
 
 
 
616	}
617
618	if (exists)
619		spin_lock_bh(&ife->tcf_lock);
620	/* protected by tcf_lock when modifying existing action */
621	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
622	p = rcu_replace_pointer(ife->params, p, 1);
623
624	if (exists)
625		spin_unlock_bh(&ife->tcf_lock);
626	if (goto_ch)
627		tcf_chain_put_by_act(goto_ch);
628	if (p)
629		kfree_rcu(p, rcu);
630
631	return ret;
632metadata_parse_err:
633	if (goto_ch)
634		tcf_chain_put_by_act(goto_ch);
635release_idr:
636	kfree(p);
637	tcf_idr_release(*a, bind);
638	return err;
639}
640
641static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
642			int ref)
643{
644	unsigned char *b = skb_tail_pointer(skb);
645	struct tcf_ife_info *ife = to_ife(a);
646	struct tcf_ife_params *p;
647	struct tc_ife opt = {
648		.index = ife->tcf_index,
649		.refcnt = refcount_read(&ife->tcf_refcnt) - ref,
650		.bindcnt = atomic_read(&ife->tcf_bindcnt) - bind,
 
 
651	};
652	struct tcf_t t;
653
654	spin_lock_bh(&ife->tcf_lock);
655	opt.action = ife->tcf_action;
656	p = rcu_dereference_protected(ife->params,
657				      lockdep_is_held(&ife->tcf_lock));
658	opt.flags = p->flags;
659
660	if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt))
661		goto nla_put_failure;
662
663	tcf_tm_dump(&t, &ife->tcf_tm);
664	if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD))
665		goto nla_put_failure;
666
667	if (!is_zero_ether_addr(p->eth_dst)) {
668		if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, p->eth_dst))
669			goto nla_put_failure;
670	}
671
672	if (!is_zero_ether_addr(p->eth_src)) {
673		if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, p->eth_src))
674			goto nla_put_failure;
675	}
676
677	if (nla_put(skb, TCA_IFE_TYPE, 2, &p->eth_type))
678		goto nla_put_failure;
679
680	if (dump_metalist(skb, ife)) {
681		/*ignore failure to dump metalist */
682		pr_info("Failed to dump metalist\n");
683	}
684
685	spin_unlock_bh(&ife->tcf_lock);
686	return skb->len;
687
688nla_put_failure:
689	spin_unlock_bh(&ife->tcf_lock);
690	nlmsg_trim(skb, b);
691	return -1;
692}
693
694static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
695			      u16 metaid, u16 mlen, void *mdata)
696{
697	struct tcf_meta_info *e;
698
699	/* XXX: use hash to speed up */
700	list_for_each_entry(e, &ife->metalist, metalist) {
701		if (metaid == e->metaid) {
702			if (e->ops) {
703				/* We check for decode presence already */
704				return e->ops->decode(skb, mdata, mlen);
705			}
706		}
707	}
708
709	return -ENOENT;
710}
711
 
 
 
 
 
 
 
 
 
 
712static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
713			  struct tcf_result *res)
714{
715	struct tcf_ife_info *ife = to_ife(a);
716	int action = ife->tcf_action;
717	u8 *ifehdr_end;
718	u8 *tlv_data;
719	u16 metalen;
720
721	bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
 
722	tcf_lastuse_update(&ife->tcf_tm);
 
723
724	if (skb_at_tc_ingress(skb))
725		skb_push(skb, skb->dev->hard_header_len);
726
727	tlv_data = ife_decode(skb, &metalen);
728	if (unlikely(!tlv_data)) {
729		qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
730		return TC_ACT_SHOT;
731	}
732
733	ifehdr_end = tlv_data + metalen;
734	for (; tlv_data < ifehdr_end; tlv_data = ife_tlv_meta_next(tlv_data)) {
735		u8 *curr_data;
736		u16 mtype;
737		u16 dlen;
738
739		curr_data = ife_tlv_meta_decode(tlv_data, ifehdr_end, &mtype,
740						&dlen, NULL);
741		if (!curr_data) {
742			qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
743			return TC_ACT_SHOT;
744		}
 
 
745
746		if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
 
747			/* abuse overlimits to count when we receive metadata
748			 * but dont have an ops for it
749			 */
750			pr_info_ratelimited("Unknown metaid %d dlen %d\n",
751					    mtype, dlen);
752			qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
753		}
754	}
755
756	if (WARN_ON(tlv_data != ifehdr_end)) {
757		qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
758		return TC_ACT_SHOT;
759	}
760
761	skb->protocol = eth_type_trans(skb, skb->dev);
762	skb_reset_network_header(skb);
763
764	return action;
765}
766
767/*XXX: check if we can do this at install time instead of current
768 * send data path
769**/
770static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife)
771{
772	struct tcf_meta_info *e, *n;
773	int tot_run_sz = 0, run_sz = 0;
774
775	list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
776		if (e->ops->check_presence) {
777			run_sz = e->ops->check_presence(skb, e);
778			tot_run_sz += run_sz;
779		}
780	}
781
782	return tot_run_sz;
783}
784
785static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
786			  struct tcf_result *res, struct tcf_ife_params *p)
787{
788	struct tcf_ife_info *ife = to_ife(a);
789	int action = ife->tcf_action;
790	struct ethhdr *oethh;	/* outer ether header */
 
791	struct tcf_meta_info *e;
792	/*
793	   OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
794	   where ORIGDATA = original ethernet header ...
795	 */
796	u16 metalen = ife_get_sz(skb, ife);
797	int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN;
798	unsigned int skboff = 0;
 
799	int new_len = skb->len + hdrm;
800	bool exceed_mtu = false;
801	void *ife_meta;
802	int err = 0;
803
804	if (!skb_at_tc_ingress(skb)) {
805		if (new_len > skb->dev->mtu)
806			exceed_mtu = true;
807	}
808
809	bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
 
810	tcf_lastuse_update(&ife->tcf_tm);
811
812	if (!metalen) {		/* no metadata to send */
813		/* abuse overlimits to count when we allow packet
814		 * with no metadata
815		 */
816		qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
 
817		return action;
818	}
819	/* could be stupid policy setup or mtu config
820	 * so lets be conservative.. */
821	if ((action == TC_ACT_SHOT) || exceed_mtu) {
822		qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
 
823		return TC_ACT_SHOT;
824	}
825
826	if (skb_at_tc_ingress(skb))
 
 
 
 
 
 
 
827		skb_push(skb, skb->dev->hard_header_len);
828
829	ife_meta = ife_encode(skb, metalen);
830
831	spin_lock(&ife->tcf_lock);
 
 
 
 
 
 
 
 
832
833	/* XXX: we dont have a clever way of telling encode to
834	 * not repeat some of the computations that are done by
835	 * ops->presence_check...
836	 */
837	list_for_each_entry(e, &ife->metalist, metalist) {
838		if (e->ops->encode) {
839			err = e->ops->encode(skb, (void *)(ife_meta + skboff),
840					     e);
841		}
842		if (err < 0) {
843			/* too corrupt to keep around if overwritten */
 
844			spin_unlock(&ife->tcf_lock);
845			qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
846			return TC_ACT_SHOT;
847		}
848		skboff += err;
849	}
850	spin_unlock(&ife->tcf_lock);
851	oethh = (struct ethhdr *)skb->data;
852
853	if (!is_zero_ether_addr(p->eth_src))
854		ether_addr_copy(oethh->h_source, p->eth_src);
855	if (!is_zero_ether_addr(p->eth_dst))
856		ether_addr_copy(oethh->h_dest, p->eth_dst);
857	oethh->h_proto = htons(p->eth_type);
 
 
 
 
858
859	if (skb_at_tc_ingress(skb))
860		skb_pull(skb, skb->dev->hard_header_len);
861
 
 
862	return action;
863}
864
865TC_INDIRECT_SCOPE int tcf_ife_act(struct sk_buff *skb,
866				  const struct tc_action *a,
867				  struct tcf_result *res)
868{
869	struct tcf_ife_info *ife = to_ife(a);
870	struct tcf_ife_params *p;
871	int ret;
872
873	p = rcu_dereference_bh(ife->params);
874	if (p->flags & IFE_ENCODE) {
875		ret = tcf_ife_encode(skb, a, res, p);
876		return ret;
877	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
878
879	return tcf_ife_decode(skb, a, res);
880}
881
882static struct tc_action_ops act_ife_ops = {
883	.kind = "ife",
884	.id = TCA_ID_IFE,
885	.owner = THIS_MODULE,
886	.act = tcf_ife_act,
887	.dump = tcf_ife_dump,
888	.cleanup = tcf_ife_cleanup,
889	.init = tcf_ife_init,
 
 
890	.size =	sizeof(struct tcf_ife_info),
891};
892MODULE_ALIAS_NET_ACT("ife");
893
894static __net_init int ife_init_net(struct net *net)
895{
896	struct tc_action_net *tn = net_generic(net, act_ife_ops.net_id);
897
898	return tc_action_net_init(net, tn, &act_ife_ops);
899}
900
901static void __net_exit ife_exit_net(struct list_head *net_list)
902{
903	tc_action_net_exit(net_list, act_ife_ops.net_id);
 
 
904}
905
906static struct pernet_operations ife_net_ops = {
907	.init = ife_init_net,
908	.exit_batch = ife_exit_net,
909	.id   = &act_ife_ops.net_id,
910	.size = sizeof(struct tc_action_net),
911};
912
913static int __init ife_init_module(void)
914{
915	return tcf_register_action(&act_ife_ops, &ife_net_ops);
916}
917
918static void __exit ife_cleanup_module(void)
919{
920	tcf_unregister_action(&act_ife_ops, &ife_net_ops);
921}
922
923module_init(ife_init_module);
924module_exit(ife_cleanup_module);
925
926MODULE_AUTHOR("Jamal Hadi Salim(2015)");
927MODULE_DESCRIPTION("Inter-FE LFB action");
928MODULE_LICENSE("GPL");