Linux Audio

Check our new training course

Loading...
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#include <linux/kernel.h>
  3#include <linux/init.h>
  4#include <linux/module.h>
  5#include <linux/seqlock.h>
  6#include <linux/netlink.h>
  7#include <linux/netfilter.h>
  8#include <linux/netfilter/nf_tables.h>
  9#include <net/netfilter/nf_tables.h>
 10#include <net/dst_metadata.h>
 11#include <net/ip_tunnels.h>
 12#include <net/vxlan.h>
 13#include <net/erspan.h>
 14#include <net/geneve.h>
 15
 16struct nft_tunnel {
 17	enum nft_tunnel_keys	key:8;
 18	u8			dreg;
 19	enum nft_tunnel_mode	mode:8;
 
 20};
 21
 22static void nft_tunnel_get_eval(const struct nft_expr *expr,
 23				struct nft_regs *regs,
 24				const struct nft_pktinfo *pkt)
 25{
 26	const struct nft_tunnel *priv = nft_expr_priv(expr);
 27	u32 *dest = &regs->data[priv->dreg];
 28	struct ip_tunnel_info *tun_info;
 29
 30	tun_info = skb_tunnel_info(pkt->skb);
 31
 32	switch (priv->key) {
 33	case NFT_TUNNEL_PATH:
 34		if (!tun_info) {
 35			nft_reg_store8(dest, false);
 36			return;
 37		}
 38		if (priv->mode == NFT_TUNNEL_MODE_NONE ||
 39		    (priv->mode == NFT_TUNNEL_MODE_RX &&
 40		     !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
 41		    (priv->mode == NFT_TUNNEL_MODE_TX &&
 42		     (tun_info->mode & IP_TUNNEL_INFO_TX)))
 43			nft_reg_store8(dest, true);
 44		else
 45			nft_reg_store8(dest, false);
 46		break;
 47	case NFT_TUNNEL_ID:
 48		if (!tun_info) {
 49			regs->verdict.code = NFT_BREAK;
 50			return;
 51		}
 52		if (priv->mode == NFT_TUNNEL_MODE_NONE ||
 53		    (priv->mode == NFT_TUNNEL_MODE_RX &&
 54		     !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
 55		    (priv->mode == NFT_TUNNEL_MODE_TX &&
 56		     (tun_info->mode & IP_TUNNEL_INFO_TX)))
 57			*dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
 58		else
 59			regs->verdict.code = NFT_BREAK;
 60		break;
 61	default:
 62		WARN_ON(1);
 63		regs->verdict.code = NFT_BREAK;
 64	}
 65}
 66
 67static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
 68	[NFTA_TUNNEL_KEY]	= { .type = NLA_U32 },
 69	[NFTA_TUNNEL_DREG]	= { .type = NLA_U32 },
 70	[NFTA_TUNNEL_MODE]	= { .type = NLA_U32 },
 71};
 72
 73static int nft_tunnel_get_init(const struct nft_ctx *ctx,
 74			       const struct nft_expr *expr,
 75			       const struct nlattr * const tb[])
 76{
 77	struct nft_tunnel *priv = nft_expr_priv(expr);
 78	u32 len;
 79
 80	if (!tb[NFTA_TUNNEL_KEY] ||
 81	    !tb[NFTA_TUNNEL_DREG])
 82		return -EINVAL;
 83
 84	priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
 85	switch (priv->key) {
 86	case NFT_TUNNEL_PATH:
 87		len = sizeof(u8);
 88		break;
 89	case NFT_TUNNEL_ID:
 90		len = sizeof(u32);
 91		break;
 92	default:
 93		return -EOPNOTSUPP;
 94	}
 95
 96	if (tb[NFTA_TUNNEL_MODE]) {
 97		priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
 98		if (priv->mode > NFT_TUNNEL_MODE_MAX)
 99			return -EOPNOTSUPP;
100	} else {
101		priv->mode = NFT_TUNNEL_MODE_NONE;
102	}
103
 
104	return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg,
105					NULL, NFT_DATA_VALUE, len);
106}
107
108static int nft_tunnel_get_dump(struct sk_buff *skb,
109			       const struct nft_expr *expr)
110{
111	const struct nft_tunnel *priv = nft_expr_priv(expr);
112
113	if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key)))
114		goto nla_put_failure;
115	if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
116		goto nla_put_failure;
117	if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode)))
118		goto nla_put_failure;
119	return 0;
120
121nla_put_failure:
122	return -1;
123}
124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125static struct nft_expr_type nft_tunnel_type;
126static const struct nft_expr_ops nft_tunnel_get_ops = {
127	.type		= &nft_tunnel_type,
128	.size		= NFT_EXPR_SIZE(sizeof(struct nft_tunnel)),
129	.eval		= nft_tunnel_get_eval,
130	.init		= nft_tunnel_get_init,
131	.dump		= nft_tunnel_get_dump,
 
132};
133
134static struct nft_expr_type nft_tunnel_type __read_mostly = {
135	.name		= "tunnel",
 
136	.ops		= &nft_tunnel_get_ops,
137	.policy		= nft_tunnel_policy,
138	.maxattr	= NFTA_TUNNEL_MAX,
139	.owner		= THIS_MODULE,
140};
141
142struct nft_tunnel_opts {
143	union {
144		struct vxlan_metadata	vxlan;
145		struct erspan_metadata	erspan;
146		u8	data[IP_TUNNEL_OPTS_MAX];
147	} u;
 
148	u32	len;
149	__be16	flags;
150};
151
152struct nft_tunnel_obj {
153	struct metadata_dst	*md;
154	struct nft_tunnel_opts	opts;
155};
156
157static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = {
158	[NFTA_TUNNEL_KEY_IP_SRC]	= { .type = NLA_U32 },
159	[NFTA_TUNNEL_KEY_IP_DST]	= { .type = NLA_U32 },
160};
161
162static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx,
163				  const struct nlattr *attr,
164				  struct ip_tunnel_info *info)
165{
166	struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
167	int err;
168
169	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr,
170					  nft_tunnel_ip_policy, NULL);
171	if (err < 0)
172		return err;
173
174	if (!tb[NFTA_TUNNEL_KEY_IP_DST])
175		return -EINVAL;
176
177	if (tb[NFTA_TUNNEL_KEY_IP_SRC])
178		info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]);
179	if (tb[NFTA_TUNNEL_KEY_IP_DST])
180		info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]);
181
182	return 0;
183}
184
185static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = {
186	[NFTA_TUNNEL_KEY_IP6_SRC]	= { .len = sizeof(struct in6_addr), },
187	[NFTA_TUNNEL_KEY_IP6_DST]	= { .len = sizeof(struct in6_addr), },
188	[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]	= { .type = NLA_U32, }
189};
190
191static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx,
192				   const struct nlattr *attr,
193				   struct ip_tunnel_info *info)
194{
195	struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
196	int err;
197
198	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr,
199					  nft_tunnel_ip6_policy, NULL);
200	if (err < 0)
201		return err;
202
203	if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
204		return -EINVAL;
205
206	if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
207		memcpy(&info->key.u.ipv6.src,
208		       nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
209		       sizeof(struct in6_addr));
210	}
211	if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
212		memcpy(&info->key.u.ipv6.dst,
213		       nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
214		       sizeof(struct in6_addr));
215	}
216	if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
217		info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
218
219	info->mode |= IP_TUNNEL_INFO_IPV6;
220
221	return 0;
222}
223
224static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = {
225	[NFTA_TUNNEL_KEY_VXLAN_GBP]	= { .type = NLA_U32 },
226};
227
228static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
229				     struct nft_tunnel_opts *opts)
230{
231	struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
232	int err;
233
234	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr,
235					  nft_tunnel_opts_vxlan_policy, NULL);
236	if (err < 0)
237		return err;
238
239	if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
240		return -EINVAL;
241
242	opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
243
244	opts->len	= sizeof(struct vxlan_metadata);
245	opts->flags	= TUNNEL_VXLAN_OPT;
 
246
247	return 0;
248}
249
250static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
251	[NFTA_TUNNEL_KEY_ERSPAN_VERSION]	= { .type = NLA_U32 },
252	[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]	= { .type = NLA_U32 },
253	[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]		= { .type = NLA_U8 },
254	[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]	= { .type = NLA_U8 },
255};
256
257static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
258				      struct nft_tunnel_opts *opts)
259{
260	struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
261	uint8_t hwid, dir;
262	int err, version;
263
264	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX,
265					  attr, nft_tunnel_opts_erspan_policy,
266					  NULL);
267	if (err < 0)
268		return err;
269
270	if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
271		 return -EINVAL;
272
273	version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
274	switch (version) {
275	case ERSPAN_VERSION:
276		if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
277			return -EINVAL;
278
279		opts->u.erspan.u.index =
280			nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
281		break;
282	case ERSPAN_VERSION2:
283		if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
284		    !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
285			return -EINVAL;
286
287		hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
288		dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
289
290		set_hwid(&opts->u.erspan.u.md2, hwid);
291		opts->u.erspan.u.md2.dir = dir;
292		break;
293	default:
294		return -EOPNOTSUPP;
295	}
296	opts->u.erspan.version = version;
297
298	opts->len	= sizeof(struct erspan_metadata);
299	opts->flags	= TUNNEL_ERSPAN_OPT;
 
300
301	return 0;
302}
303
304static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = {
305	[NFTA_TUNNEL_KEY_GENEVE_CLASS]	= { .type = NLA_U16 },
306	[NFTA_TUNNEL_KEY_GENEVE_TYPE]	= { .type = NLA_U8 },
307	[NFTA_TUNNEL_KEY_GENEVE_DATA]	= { .type = NLA_BINARY, .len = 128 },
308};
309
310static int nft_tunnel_obj_geneve_init(const struct nlattr *attr,
311				      struct nft_tunnel_opts *opts)
312{
313	struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len;
314	struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1];
315	int err, data_len;
316
317	err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_GENEVE_MAX, attr,
318			       nft_tunnel_opts_geneve_policy, NULL);
319	if (err < 0)
320		return err;
321
322	if (!tb[NFTA_TUNNEL_KEY_GENEVE_CLASS] ||
323	    !tb[NFTA_TUNNEL_KEY_GENEVE_TYPE] ||
324	    !tb[NFTA_TUNNEL_KEY_GENEVE_DATA])
325		return -EINVAL;
326
327	attr = tb[NFTA_TUNNEL_KEY_GENEVE_DATA];
328	data_len = nla_len(attr);
329	if (data_len % 4)
330		return -EINVAL;
331
332	opts->len += sizeof(*opt) + data_len;
333	if (opts->len > IP_TUNNEL_OPTS_MAX)
334		return -EINVAL;
335
336	memcpy(opt->opt_data, nla_data(attr), data_len);
337	opt->length = data_len / 4;
338	opt->opt_class = nla_get_be16(tb[NFTA_TUNNEL_KEY_GENEVE_CLASS]);
339	opt->type = nla_get_u8(tb[NFTA_TUNNEL_KEY_GENEVE_TYPE]);
340	opts->flags = TUNNEL_GENEVE_OPT;
 
341
342	return 0;
343}
344
345static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
346	[NFTA_TUNNEL_KEY_OPTS_UNSPEC]	= {
347		.strict_start_type = NFTA_TUNNEL_KEY_OPTS_GENEVE },
348	[NFTA_TUNNEL_KEY_OPTS_VXLAN]	= { .type = NLA_NESTED, },
349	[NFTA_TUNNEL_KEY_OPTS_ERSPAN]	= { .type = NLA_NESTED, },
350	[NFTA_TUNNEL_KEY_OPTS_GENEVE]	= { .type = NLA_NESTED, },
351};
352
353static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
354				    const struct nlattr *attr,
355				    struct ip_tunnel_info *info,
356				    struct nft_tunnel_opts *opts)
357{
358	int err, rem, type = 0;
359	struct nlattr *nla;
 
 
360
361	err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX,
362					     nft_tunnel_opts_policy, NULL);
363	if (err < 0)
364		return err;
365
366	nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
367		switch (nla_type(nla)) {
368		case NFTA_TUNNEL_KEY_OPTS_VXLAN:
369			if (type)
370				return -EINVAL;
371			err = nft_tunnel_obj_vxlan_init(nla, opts);
372			if (err)
373				return err;
374			type = TUNNEL_VXLAN_OPT;
375			break;
376		case NFTA_TUNNEL_KEY_OPTS_ERSPAN:
377			if (type)
378				return -EINVAL;
379			err = nft_tunnel_obj_erspan_init(nla, opts);
380			if (err)
381				return err;
382			type = TUNNEL_ERSPAN_OPT;
383			break;
384		case NFTA_TUNNEL_KEY_OPTS_GENEVE:
385			if (type && type != TUNNEL_GENEVE_OPT)
386				return -EINVAL;
387			err = nft_tunnel_obj_geneve_init(nla, opts);
388			if (err)
389				return err;
390			type = TUNNEL_GENEVE_OPT;
391			break;
392		default:
393			return -EOPNOTSUPP;
394		}
395	}
396
397	return err;
398}
399
400static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = {
401	[NFTA_TUNNEL_KEY_IP]	= { .type = NLA_NESTED, },
402	[NFTA_TUNNEL_KEY_IP6]	= { .type = NLA_NESTED, },
403	[NFTA_TUNNEL_KEY_ID]	= { .type = NLA_U32, },
404	[NFTA_TUNNEL_KEY_FLAGS]	= { .type = NLA_U32, },
405	[NFTA_TUNNEL_KEY_TOS]	= { .type = NLA_U8, },
406	[NFTA_TUNNEL_KEY_TTL]	= { .type = NLA_U8, },
407	[NFTA_TUNNEL_KEY_SPORT]	= { .type = NLA_U16, },
408	[NFTA_TUNNEL_KEY_DPORT]	= { .type = NLA_U16, },
409	[NFTA_TUNNEL_KEY_OPTS]	= { .type = NLA_NESTED, },
410};
411
412static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
413			       const struct nlattr * const tb[],
414			       struct nft_object *obj)
415{
416	struct nft_tunnel_obj *priv = nft_obj_data(obj);
417	struct ip_tunnel_info info;
418	struct metadata_dst *md;
419	int err;
420
421	if (!tb[NFTA_TUNNEL_KEY_ID])
422		return -EINVAL;
423
424	memset(&info, 0, sizeof(info));
425	info.mode		= IP_TUNNEL_INFO_TX;
426	info.key.tun_id		= key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
427	info.key.tun_flags	= TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
 
 
428
429	if (tb[NFTA_TUNNEL_KEY_IP]) {
430		err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
431		if (err < 0)
432			return err;
433	} else if (tb[NFTA_TUNNEL_KEY_IP6]) {
434		err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info);
435		if (err < 0)
436			return err;
437	} else {
438		return -EINVAL;
439	}
440
441	if (tb[NFTA_TUNNEL_KEY_SPORT]) {
442		info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]);
443	}
444	if (tb[NFTA_TUNNEL_KEY_DPORT]) {
445		info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]);
446	}
447
448	if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
449		u32 tun_flags;
450
451		tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
452		if (tun_flags & ~NFT_TUNNEL_F_MASK)
453			return -EOPNOTSUPP;
454
455		if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
456			info.key.tun_flags &= ~TUNNEL_CSUM;
457		if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
458			info.key.tun_flags |= TUNNEL_DONT_FRAGMENT;
 
459		if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
460			info.key.tun_flags |= TUNNEL_SEQ;
461	}
462	if (tb[NFTA_TUNNEL_KEY_TOS])
463		info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
464	if (tb[NFTA_TUNNEL_KEY_TTL])
465		info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]);
466	else
467		info.key.ttl = U8_MAX;
468
469	if (tb[NFTA_TUNNEL_KEY_OPTS]) {
470		err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
471					       &info, &priv->opts);
472		if (err < 0)
473			return err;
474	}
475
476	md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
 
477	if (!md)
478		return -ENOMEM;
479
480	memcpy(&md->u.tun_info, &info, sizeof(info));
481#ifdef CONFIG_DST_CACHE
482	err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
483	if (err < 0) {
484		metadata_dst_free(md);
485		return err;
486	}
487#endif
488	ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
489				priv->opts.flags);
490	priv->md = md;
491
492	return 0;
493}
494
495static inline void nft_tunnel_obj_eval(struct nft_object *obj,
496				       struct nft_regs *regs,
497				       const struct nft_pktinfo *pkt)
498{
499	struct nft_tunnel_obj *priv = nft_obj_data(obj);
500	struct sk_buff *skb = pkt->skb;
501
502	skb_dst_drop(skb);
503	dst_hold((struct dst_entry *) priv->md);
504	skb_dst_set(skb, (struct dst_entry *) priv->md);
505}
506
507static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
508{
509	struct nlattr *nest;
510
511	if (info->mode & IP_TUNNEL_INFO_IPV6) {
512		nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP6);
513		if (!nest)
514			return -1;
515
516		if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC,
517				     &info->key.u.ipv6.src) < 0 ||
518		    nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST,
519				     &info->key.u.ipv6.dst) < 0 ||
520		    nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
521				 info->key.label)) {
522			nla_nest_cancel(skb, nest);
523			return -1;
524		}
525
526		nla_nest_end(skb, nest);
527	} else {
528		nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP);
529		if (!nest)
530			return -1;
531
532		if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC,
533				    info->key.u.ipv4.src) < 0 ||
534		    nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST,
535				    info->key.u.ipv4.dst) < 0) {
536			nla_nest_cancel(skb, nest);
537			return -1;
538		}
539
540		nla_nest_end(skb, nest);
541	}
542
543	return 0;
544}
545
546static int nft_tunnel_opts_dump(struct sk_buff *skb,
547				struct nft_tunnel_obj *priv)
548{
549	struct nft_tunnel_opts *opts = &priv->opts;
550	struct nlattr *nest, *inner;
551
552	nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS);
553	if (!nest)
554		return -1;
555
556	if (opts->flags & TUNNEL_VXLAN_OPT) {
557		inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN);
558		if (!inner)
559			goto failure;
560		if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
561				 htonl(opts->u.vxlan.gbp)))
562			goto inner_failure;
563		nla_nest_end(skb, inner);
564	} else if (opts->flags & TUNNEL_ERSPAN_OPT) {
565		inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN);
566		if (!inner)
567			goto failure;
568		if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_VERSION,
569				 htonl(opts->u.erspan.version)))
570			goto inner_failure;
571		switch (opts->u.erspan.version) {
572		case ERSPAN_VERSION:
573			if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
574					 opts->u.erspan.u.index))
575				goto inner_failure;
576			break;
577		case ERSPAN_VERSION2:
578			if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
579				       get_hwid(&opts->u.erspan.u.md2)) ||
580			    nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
581				       opts->u.erspan.u.md2.dir))
582				goto inner_failure;
583			break;
584		}
585		nla_nest_end(skb, inner);
586	} else if (opts->flags & TUNNEL_GENEVE_OPT) {
587		struct geneve_opt *opt;
588		int offset = 0;
589
590		inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_GENEVE);
591		if (!inner)
592			goto failure;
593		while (opts->len > offset) {
594			opt = (struct geneve_opt *)opts->u.data + offset;
595			if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS,
596					 opt->opt_class) ||
597			    nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE,
598				       opt->type) ||
599			    nla_put(skb, NFTA_TUNNEL_KEY_GENEVE_DATA,
600				    opt->length * 4, opt->opt_data))
601				goto inner_failure;
602			offset += sizeof(*opt) + opt->length * 4;
603		}
604		nla_nest_end(skb, inner);
605	}
606	nla_nest_end(skb, nest);
607	return 0;
608
609inner_failure:
610	nla_nest_cancel(skb, inner);
611failure:
612	nla_nest_cancel(skb, nest);
613	return -1;
614}
615
616static int nft_tunnel_ports_dump(struct sk_buff *skb,
617				 struct ip_tunnel_info *info)
618{
619	if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
620	    nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
621		return -1;
622
623	return 0;
624}
625
626static int nft_tunnel_flags_dump(struct sk_buff *skb,
627				 struct ip_tunnel_info *info)
628{
629	u32 flags = 0;
630
631	if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
632		flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
633	if (!(info->key.tun_flags & TUNNEL_CSUM))
634		flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
635	if (info->key.tun_flags & TUNNEL_SEQ)
636		flags |= NFT_TUNNEL_F_SEQ_NUMBER;
637
638	if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
639		return -1;
640
641	return 0;
642}
643
644static int nft_tunnel_obj_dump(struct sk_buff *skb,
645			       struct nft_object *obj, bool reset)
646{
647	struct nft_tunnel_obj *priv = nft_obj_data(obj);
648	struct ip_tunnel_info *info = &priv->md->u.tun_info;
649
650	if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID,
651			 tunnel_id_to_key32(info->key.tun_id)) ||
652	    nft_tunnel_ip_dump(skb, info) < 0 ||
653	    nft_tunnel_ports_dump(skb, info) < 0 ||
654	    nft_tunnel_flags_dump(skb, info) < 0 ||
655	    nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) ||
656	    nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) ||
657	    nft_tunnel_opts_dump(skb, priv) < 0)
658		goto nla_put_failure;
659
660	return 0;
661
662nla_put_failure:
663	return -1;
664}
665
666static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx,
667				   struct nft_object *obj)
668{
669	struct nft_tunnel_obj *priv = nft_obj_data(obj);
670
671	metadata_dst_free(priv->md);
672}
673
674static struct nft_object_type nft_tunnel_obj_type;
675static const struct nft_object_ops nft_tunnel_obj_ops = {
676	.type		= &nft_tunnel_obj_type,
677	.size		= sizeof(struct nft_tunnel_obj),
678	.eval		= nft_tunnel_obj_eval,
679	.init		= nft_tunnel_obj_init,
680	.destroy	= nft_tunnel_obj_destroy,
681	.dump		= nft_tunnel_obj_dump,
682};
683
684static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
685	.type		= NFT_OBJECT_TUNNEL,
 
686	.ops		= &nft_tunnel_obj_ops,
687	.maxattr	= NFTA_TUNNEL_KEY_MAX,
688	.policy		= nft_tunnel_key_policy,
689	.owner		= THIS_MODULE,
690};
691
692static int __init nft_tunnel_module_init(void)
693{
694	int err;
695
696	err = nft_register_expr(&nft_tunnel_type);
697	if (err < 0)
698		return err;
699
700	err = nft_register_obj(&nft_tunnel_obj_type);
701	if (err < 0)
702		nft_unregister_expr(&nft_tunnel_type);
703
704	return err;
705}
706
707static void __exit nft_tunnel_module_exit(void)
708{
709	nft_unregister_obj(&nft_tunnel_obj_type);
710	nft_unregister_expr(&nft_tunnel_type);
711}
712
713module_init(nft_tunnel_module_init);
714module_exit(nft_tunnel_module_exit);
715
716MODULE_LICENSE("GPL");
717MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
718MODULE_ALIAS_NFT_EXPR("tunnel");
719MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
720MODULE_DESCRIPTION("nftables tunnel expression support");
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#include <linux/kernel.h>
  3#include <linux/init.h>
  4#include <linux/module.h>
  5#include <linux/seqlock.h>
  6#include <linux/netlink.h>
  7#include <linux/netfilter.h>
  8#include <linux/netfilter/nf_tables.h>
  9#include <net/netfilter/nf_tables.h>
 10#include <net/dst_metadata.h>
 11#include <net/ip_tunnels.h>
 12#include <net/vxlan.h>
 13#include <net/erspan.h>
 14#include <net/geneve.h>
 15
 16struct nft_tunnel {
 17	enum nft_tunnel_keys	key:8;
 18	u8			dreg;
 19	enum nft_tunnel_mode	mode:8;
 20	u8			len;
 21};
 22
 23static void nft_tunnel_get_eval(const struct nft_expr *expr,
 24				struct nft_regs *regs,
 25				const struct nft_pktinfo *pkt)
 26{
 27	const struct nft_tunnel *priv = nft_expr_priv(expr);
 28	u32 *dest = &regs->data[priv->dreg];
 29	struct ip_tunnel_info *tun_info;
 30
 31	tun_info = skb_tunnel_info(pkt->skb);
 32
 33	switch (priv->key) {
 34	case NFT_TUNNEL_PATH:
 35		if (!tun_info) {
 36			nft_reg_store8(dest, false);
 37			return;
 38		}
 39		if (priv->mode == NFT_TUNNEL_MODE_NONE ||
 40		    (priv->mode == NFT_TUNNEL_MODE_RX &&
 41		     !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
 42		    (priv->mode == NFT_TUNNEL_MODE_TX &&
 43		     (tun_info->mode & IP_TUNNEL_INFO_TX)))
 44			nft_reg_store8(dest, true);
 45		else
 46			nft_reg_store8(dest, false);
 47		break;
 48	case NFT_TUNNEL_ID:
 49		if (!tun_info) {
 50			regs->verdict.code = NFT_BREAK;
 51			return;
 52		}
 53		if (priv->mode == NFT_TUNNEL_MODE_NONE ||
 54		    (priv->mode == NFT_TUNNEL_MODE_RX &&
 55		     !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
 56		    (priv->mode == NFT_TUNNEL_MODE_TX &&
 57		     (tun_info->mode & IP_TUNNEL_INFO_TX)))
 58			*dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
 59		else
 60			regs->verdict.code = NFT_BREAK;
 61		break;
 62	default:
 63		WARN_ON(1);
 64		regs->verdict.code = NFT_BREAK;
 65	}
 66}
 67
 68static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
 69	[NFTA_TUNNEL_KEY]	= NLA_POLICY_MAX(NLA_BE32, 255),
 70	[NFTA_TUNNEL_DREG]	= { .type = NLA_U32 },
 71	[NFTA_TUNNEL_MODE]	= NLA_POLICY_MAX(NLA_BE32, 255),
 72};
 73
 74static int nft_tunnel_get_init(const struct nft_ctx *ctx,
 75			       const struct nft_expr *expr,
 76			       const struct nlattr * const tb[])
 77{
 78	struct nft_tunnel *priv = nft_expr_priv(expr);
 79	u32 len;
 80
 81	if (!tb[NFTA_TUNNEL_KEY] ||
 82	    !tb[NFTA_TUNNEL_DREG])
 83		return -EINVAL;
 84
 85	priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
 86	switch (priv->key) {
 87	case NFT_TUNNEL_PATH:
 88		len = sizeof(u8);
 89		break;
 90	case NFT_TUNNEL_ID:
 91		len = sizeof(u32);
 92		break;
 93	default:
 94		return -EOPNOTSUPP;
 95	}
 96
 97	if (tb[NFTA_TUNNEL_MODE]) {
 98		priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
 99		if (priv->mode > NFT_TUNNEL_MODE_MAX)
100			return -EOPNOTSUPP;
101	} else {
102		priv->mode = NFT_TUNNEL_MODE_NONE;
103	}
104
105	priv->len = len;
106	return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg,
107					NULL, NFT_DATA_VALUE, len);
108}
109
110static int nft_tunnel_get_dump(struct sk_buff *skb,
111			       const struct nft_expr *expr, bool reset)
112{
113	const struct nft_tunnel *priv = nft_expr_priv(expr);
114
115	if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key)))
116		goto nla_put_failure;
117	if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
118		goto nla_put_failure;
119	if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode)))
120		goto nla_put_failure;
121	return 0;
122
123nla_put_failure:
124	return -1;
125}
126
127static bool nft_tunnel_get_reduce(struct nft_regs_track *track,
128				  const struct nft_expr *expr)
129{
130	const struct nft_tunnel *priv = nft_expr_priv(expr);
131	const struct nft_tunnel *tunnel;
132
133	if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
134		nft_reg_track_update(track, expr, priv->dreg, priv->len);
135		return false;
136	}
137
138	tunnel = nft_expr_priv(track->regs[priv->dreg].selector);
139	if (priv->key != tunnel->key ||
140	    priv->dreg != tunnel->dreg ||
141	    priv->mode != tunnel->mode) {
142		nft_reg_track_update(track, expr, priv->dreg, priv->len);
143		return false;
144	}
145
146	if (!track->regs[priv->dreg].bitwise)
147		return true;
148
149	return false;
150}
151
152static struct nft_expr_type nft_tunnel_type;
153static const struct nft_expr_ops nft_tunnel_get_ops = {
154	.type		= &nft_tunnel_type,
155	.size		= NFT_EXPR_SIZE(sizeof(struct nft_tunnel)),
156	.eval		= nft_tunnel_get_eval,
157	.init		= nft_tunnel_get_init,
158	.dump		= nft_tunnel_get_dump,
159	.reduce		= nft_tunnel_get_reduce,
160};
161
162static struct nft_expr_type nft_tunnel_type __read_mostly = {
163	.name		= "tunnel",
164	.family		= NFPROTO_NETDEV,
165	.ops		= &nft_tunnel_get_ops,
166	.policy		= nft_tunnel_policy,
167	.maxattr	= NFTA_TUNNEL_MAX,
168	.owner		= THIS_MODULE,
169};
170
171struct nft_tunnel_opts {
172	union {
173		struct vxlan_metadata	vxlan;
174		struct erspan_metadata	erspan;
175		u8	data[IP_TUNNEL_OPTS_MAX];
176	} u;
177	IP_TUNNEL_DECLARE_FLAGS(flags);
178	u32	len;
 
179};
180
181struct nft_tunnel_obj {
182	struct metadata_dst	*md;
183	struct nft_tunnel_opts	opts;
184};
185
186static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = {
187	[NFTA_TUNNEL_KEY_IP_SRC]	= { .type = NLA_U32 },
188	[NFTA_TUNNEL_KEY_IP_DST]	= { .type = NLA_U32 },
189};
190
191static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx,
192				  const struct nlattr *attr,
193				  struct ip_tunnel_info *info)
194{
195	struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
196	int err;
197
198	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr,
199					  nft_tunnel_ip_policy, NULL);
200	if (err < 0)
201		return err;
202
203	if (!tb[NFTA_TUNNEL_KEY_IP_DST])
204		return -EINVAL;
205
206	if (tb[NFTA_TUNNEL_KEY_IP_SRC])
207		info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]);
208	if (tb[NFTA_TUNNEL_KEY_IP_DST])
209		info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]);
210
211	return 0;
212}
213
214static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = {
215	[NFTA_TUNNEL_KEY_IP6_SRC]	= { .len = sizeof(struct in6_addr), },
216	[NFTA_TUNNEL_KEY_IP6_DST]	= { .len = sizeof(struct in6_addr), },
217	[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]	= { .type = NLA_U32, }
218};
219
220static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx,
221				   const struct nlattr *attr,
222				   struct ip_tunnel_info *info)
223{
224	struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
225	int err;
226
227	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr,
228					  nft_tunnel_ip6_policy, NULL);
229	if (err < 0)
230		return err;
231
232	if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
233		return -EINVAL;
234
235	if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
236		memcpy(&info->key.u.ipv6.src,
237		       nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
238		       sizeof(struct in6_addr));
239	}
240	if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
241		memcpy(&info->key.u.ipv6.dst,
242		       nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
243		       sizeof(struct in6_addr));
244	}
245	if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
246		info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
247
248	info->mode |= IP_TUNNEL_INFO_IPV6;
249
250	return 0;
251}
252
253static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = {
254	[NFTA_TUNNEL_KEY_VXLAN_GBP]	= { .type = NLA_U32 },
255};
256
257static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
258				     struct nft_tunnel_opts *opts)
259{
260	struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
261	int err;
262
263	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr,
264					  nft_tunnel_opts_vxlan_policy, NULL);
265	if (err < 0)
266		return err;
267
268	if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
269		return -EINVAL;
270
271	opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
272
273	opts->len	= sizeof(struct vxlan_metadata);
274	ip_tunnel_flags_zero(opts->flags);
275	__set_bit(IP_TUNNEL_VXLAN_OPT_BIT, opts->flags);
276
277	return 0;
278}
279
280static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
281	[NFTA_TUNNEL_KEY_ERSPAN_VERSION]	= { .type = NLA_U32 },
282	[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]	= { .type = NLA_U32 },
283	[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]		= { .type = NLA_U8 },
284	[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]	= { .type = NLA_U8 },
285};
286
287static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
288				      struct nft_tunnel_opts *opts)
289{
290	struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
291	uint8_t hwid, dir;
292	int err, version;
293
294	err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX,
295					  attr, nft_tunnel_opts_erspan_policy,
296					  NULL);
297	if (err < 0)
298		return err;
299
300	if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
301		 return -EINVAL;
302
303	version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
304	switch (version) {
305	case ERSPAN_VERSION:
306		if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
307			return -EINVAL;
308
309		opts->u.erspan.u.index =
310			nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
311		break;
312	case ERSPAN_VERSION2:
313		if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
314		    !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
315			return -EINVAL;
316
317		hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
318		dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
319
320		set_hwid(&opts->u.erspan.u.md2, hwid);
321		opts->u.erspan.u.md2.dir = dir;
322		break;
323	default:
324		return -EOPNOTSUPP;
325	}
326	opts->u.erspan.version = version;
327
328	opts->len	= sizeof(struct erspan_metadata);
329	ip_tunnel_flags_zero(opts->flags);
330	__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, opts->flags);
331
332	return 0;
333}
334
335static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = {
336	[NFTA_TUNNEL_KEY_GENEVE_CLASS]	= { .type = NLA_U16 },
337	[NFTA_TUNNEL_KEY_GENEVE_TYPE]	= { .type = NLA_U8 },
338	[NFTA_TUNNEL_KEY_GENEVE_DATA]	= { .type = NLA_BINARY, .len = 128 },
339};
340
341static int nft_tunnel_obj_geneve_init(const struct nlattr *attr,
342				      struct nft_tunnel_opts *opts)
343{
344	struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len;
345	struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1];
346	int err, data_len;
347
348	err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_GENEVE_MAX, attr,
349			       nft_tunnel_opts_geneve_policy, NULL);
350	if (err < 0)
351		return err;
352
353	if (!tb[NFTA_TUNNEL_KEY_GENEVE_CLASS] ||
354	    !tb[NFTA_TUNNEL_KEY_GENEVE_TYPE] ||
355	    !tb[NFTA_TUNNEL_KEY_GENEVE_DATA])
356		return -EINVAL;
357
358	attr = tb[NFTA_TUNNEL_KEY_GENEVE_DATA];
359	data_len = nla_len(attr);
360	if (data_len % 4)
361		return -EINVAL;
362
363	opts->len += sizeof(*opt) + data_len;
364	if (opts->len > IP_TUNNEL_OPTS_MAX)
365		return -EINVAL;
366
367	memcpy(opt->opt_data, nla_data(attr), data_len);
368	opt->length = data_len / 4;
369	opt->opt_class = nla_get_be16(tb[NFTA_TUNNEL_KEY_GENEVE_CLASS]);
370	opt->type = nla_get_u8(tb[NFTA_TUNNEL_KEY_GENEVE_TYPE]);
371	ip_tunnel_flags_zero(opts->flags);
372	__set_bit(IP_TUNNEL_GENEVE_OPT_BIT, opts->flags);
373
374	return 0;
375}
376
377static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
378	[NFTA_TUNNEL_KEY_OPTS_UNSPEC]	= {
379		.strict_start_type = NFTA_TUNNEL_KEY_OPTS_GENEVE },
380	[NFTA_TUNNEL_KEY_OPTS_VXLAN]	= { .type = NLA_NESTED, },
381	[NFTA_TUNNEL_KEY_OPTS_ERSPAN]	= { .type = NLA_NESTED, },
382	[NFTA_TUNNEL_KEY_OPTS_GENEVE]	= { .type = NLA_NESTED, },
383};
384
385static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
386				    const struct nlattr *attr,
387				    struct ip_tunnel_info *info,
388				    struct nft_tunnel_opts *opts)
389{
 
390	struct nlattr *nla;
391	int err, rem;
392	u32 type = 0;
393
394	err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX,
395					     nft_tunnel_opts_policy, NULL);
396	if (err < 0)
397		return err;
398
399	nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
400		switch (nla_type(nla)) {
401		case NFTA_TUNNEL_KEY_OPTS_VXLAN:
402			if (type)
403				return -EINVAL;
404			err = nft_tunnel_obj_vxlan_init(nla, opts);
405			if (err)
406				return err;
407			type = IP_TUNNEL_VXLAN_OPT_BIT;
408			break;
409		case NFTA_TUNNEL_KEY_OPTS_ERSPAN:
410			if (type)
411				return -EINVAL;
412			err = nft_tunnel_obj_erspan_init(nla, opts);
413			if (err)
414				return err;
415			type = IP_TUNNEL_ERSPAN_OPT_BIT;
416			break;
417		case NFTA_TUNNEL_KEY_OPTS_GENEVE:
418			if (type && type != IP_TUNNEL_GENEVE_OPT_BIT)
419				return -EINVAL;
420			err = nft_tunnel_obj_geneve_init(nla, opts);
421			if (err)
422				return err;
423			type = IP_TUNNEL_GENEVE_OPT_BIT;
424			break;
425		default:
426			return -EOPNOTSUPP;
427		}
428	}
429
430	return err;
431}
432
433static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = {
434	[NFTA_TUNNEL_KEY_IP]	= { .type = NLA_NESTED, },
435	[NFTA_TUNNEL_KEY_IP6]	= { .type = NLA_NESTED, },
436	[NFTA_TUNNEL_KEY_ID]	= { .type = NLA_U32, },
437	[NFTA_TUNNEL_KEY_FLAGS]	= { .type = NLA_U32, },
438	[NFTA_TUNNEL_KEY_TOS]	= { .type = NLA_U8, },
439	[NFTA_TUNNEL_KEY_TTL]	= { .type = NLA_U8, },
440	[NFTA_TUNNEL_KEY_SPORT]	= { .type = NLA_U16, },
441	[NFTA_TUNNEL_KEY_DPORT]	= { .type = NLA_U16, },
442	[NFTA_TUNNEL_KEY_OPTS]	= { .type = NLA_NESTED, },
443};
444
445static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
446			       const struct nlattr * const tb[],
447			       struct nft_object *obj)
448{
449	struct nft_tunnel_obj *priv = nft_obj_data(obj);
450	struct ip_tunnel_info info;
451	struct metadata_dst *md;
452	int err;
453
454	if (!tb[NFTA_TUNNEL_KEY_ID])
455		return -EINVAL;
456
457	memset(&info, 0, sizeof(info));
458	info.mode		= IP_TUNNEL_INFO_TX;
459	info.key.tun_id		= key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
460	__set_bit(IP_TUNNEL_KEY_BIT, info.key.tun_flags);
461	__set_bit(IP_TUNNEL_CSUM_BIT, info.key.tun_flags);
462	__set_bit(IP_TUNNEL_NOCACHE_BIT, info.key.tun_flags);
463
464	if (tb[NFTA_TUNNEL_KEY_IP]) {
465		err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
466		if (err < 0)
467			return err;
468	} else if (tb[NFTA_TUNNEL_KEY_IP6]) {
469		err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info);
470		if (err < 0)
471			return err;
472	} else {
473		return -EINVAL;
474	}
475
476	if (tb[NFTA_TUNNEL_KEY_SPORT]) {
477		info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]);
478	}
479	if (tb[NFTA_TUNNEL_KEY_DPORT]) {
480		info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]);
481	}
482
483	if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
484		u32 tun_flags;
485
486		tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
487		if (tun_flags & ~NFT_TUNNEL_F_MASK)
488			return -EOPNOTSUPP;
489
490		if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
491			__clear_bit(IP_TUNNEL_CSUM_BIT, info.key.tun_flags);
492		if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
493			__set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT,
494				  info.key.tun_flags);
495		if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
496			__set_bit(IP_TUNNEL_SEQ_BIT, info.key.tun_flags);
497	}
498	if (tb[NFTA_TUNNEL_KEY_TOS])
499		info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
500	info.key.ttl = nla_get_u8_default(tb[NFTA_TUNNEL_KEY_TTL], U8_MAX);
 
 
 
501
502	if (tb[NFTA_TUNNEL_KEY_OPTS]) {
503		err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
504					       &info, &priv->opts);
505		if (err < 0)
506			return err;
507	}
508
509	md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL,
510				GFP_KERNEL_ACCOUNT);
511	if (!md)
512		return -ENOMEM;
513
514	memcpy(&md->u.tun_info, &info, sizeof(info));
515#ifdef CONFIG_DST_CACHE
516	err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL_ACCOUNT);
517	if (err < 0) {
518		metadata_dst_free(md);
519		return err;
520	}
521#endif
522	ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
523				priv->opts.flags);
524	priv->md = md;
525
526	return 0;
527}
528
529static inline void nft_tunnel_obj_eval(struct nft_object *obj,
530				       struct nft_regs *regs,
531				       const struct nft_pktinfo *pkt)
532{
533	struct nft_tunnel_obj *priv = nft_obj_data(obj);
534	struct sk_buff *skb = pkt->skb;
535
536	skb_dst_drop(skb);
537	dst_hold((struct dst_entry *) priv->md);
538	skb_dst_set(skb, (struct dst_entry *) priv->md);
539}
540
541static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
542{
543	struct nlattr *nest;
544
545	if (info->mode & IP_TUNNEL_INFO_IPV6) {
546		nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP6);
547		if (!nest)
548			return -1;
549
550		if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC,
551				     &info->key.u.ipv6.src) < 0 ||
552		    nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST,
553				     &info->key.u.ipv6.dst) < 0 ||
554		    nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
555				 info->key.label)) {
556			nla_nest_cancel(skb, nest);
557			return -1;
558		}
559
560		nla_nest_end(skb, nest);
561	} else {
562		nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP);
563		if (!nest)
564			return -1;
565
566		if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC,
567				    info->key.u.ipv4.src) < 0 ||
568		    nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST,
569				    info->key.u.ipv4.dst) < 0) {
570			nla_nest_cancel(skb, nest);
571			return -1;
572		}
573
574		nla_nest_end(skb, nest);
575	}
576
577	return 0;
578}
579
580static int nft_tunnel_opts_dump(struct sk_buff *skb,
581				struct nft_tunnel_obj *priv)
582{
583	struct nft_tunnel_opts *opts = &priv->opts;
584	struct nlattr *nest, *inner;
585
586	nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS);
587	if (!nest)
588		return -1;
589
590	if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, opts->flags)) {
591		inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN);
592		if (!inner)
593			goto failure;
594		if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
595				 htonl(opts->u.vxlan.gbp)))
596			goto inner_failure;
597		nla_nest_end(skb, inner);
598	} else if (test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, opts->flags)) {
599		inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN);
600		if (!inner)
601			goto failure;
602		if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_VERSION,
603				 htonl(opts->u.erspan.version)))
604			goto inner_failure;
605		switch (opts->u.erspan.version) {
606		case ERSPAN_VERSION:
607			if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
608					 opts->u.erspan.u.index))
609				goto inner_failure;
610			break;
611		case ERSPAN_VERSION2:
612			if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
613				       get_hwid(&opts->u.erspan.u.md2)) ||
614			    nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
615				       opts->u.erspan.u.md2.dir))
616				goto inner_failure;
617			break;
618		}
619		nla_nest_end(skb, inner);
620	} else if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, opts->flags)) {
621		struct geneve_opt *opt;
622		int offset = 0;
623
624		inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_GENEVE);
625		if (!inner)
626			goto failure;
627		while (opts->len > offset) {
628			opt = (struct geneve_opt *)opts->u.data + offset;
629			if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS,
630					 opt->opt_class) ||
631			    nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE,
632				       opt->type) ||
633			    nla_put(skb, NFTA_TUNNEL_KEY_GENEVE_DATA,
634				    opt->length * 4, opt->opt_data))
635				goto inner_failure;
636			offset += sizeof(*opt) + opt->length * 4;
637		}
638		nla_nest_end(skb, inner);
639	}
640	nla_nest_end(skb, nest);
641	return 0;
642
643inner_failure:
644	nla_nest_cancel(skb, inner);
645failure:
646	nla_nest_cancel(skb, nest);
647	return -1;
648}
649
650static int nft_tunnel_ports_dump(struct sk_buff *skb,
651				 struct ip_tunnel_info *info)
652{
653	if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
654	    nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
655		return -1;
656
657	return 0;
658}
659
660static int nft_tunnel_flags_dump(struct sk_buff *skb,
661				 struct ip_tunnel_info *info)
662{
663	u32 flags = 0;
664
665	if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, info->key.tun_flags))
666		flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
667	if (!test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags))
668		flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
669	if (test_bit(IP_TUNNEL_SEQ_BIT, info->key.tun_flags))
670		flags |= NFT_TUNNEL_F_SEQ_NUMBER;
671
672	if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
673		return -1;
674
675	return 0;
676}
677
678static int nft_tunnel_obj_dump(struct sk_buff *skb,
679			       struct nft_object *obj, bool reset)
680{
681	struct nft_tunnel_obj *priv = nft_obj_data(obj);
682	struct ip_tunnel_info *info = &priv->md->u.tun_info;
683
684	if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID,
685			 tunnel_id_to_key32(info->key.tun_id)) ||
686	    nft_tunnel_ip_dump(skb, info) < 0 ||
687	    nft_tunnel_ports_dump(skb, info) < 0 ||
688	    nft_tunnel_flags_dump(skb, info) < 0 ||
689	    nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) ||
690	    nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) ||
691	    nft_tunnel_opts_dump(skb, priv) < 0)
692		goto nla_put_failure;
693
694	return 0;
695
696nla_put_failure:
697	return -1;
698}
699
700static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx,
701				   struct nft_object *obj)
702{
703	struct nft_tunnel_obj *priv = nft_obj_data(obj);
704
705	metadata_dst_free(priv->md);
706}
707
708static struct nft_object_type nft_tunnel_obj_type;
709static const struct nft_object_ops nft_tunnel_obj_ops = {
710	.type		= &nft_tunnel_obj_type,
711	.size		= sizeof(struct nft_tunnel_obj),
712	.eval		= nft_tunnel_obj_eval,
713	.init		= nft_tunnel_obj_init,
714	.destroy	= nft_tunnel_obj_destroy,
715	.dump		= nft_tunnel_obj_dump,
716};
717
718static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
719	.type		= NFT_OBJECT_TUNNEL,
720	.family		= NFPROTO_NETDEV,
721	.ops		= &nft_tunnel_obj_ops,
722	.maxattr	= NFTA_TUNNEL_KEY_MAX,
723	.policy		= nft_tunnel_key_policy,
724	.owner		= THIS_MODULE,
725};
726
727static int __init nft_tunnel_module_init(void)
728{
729	int err;
730
731	err = nft_register_expr(&nft_tunnel_type);
732	if (err < 0)
733		return err;
734
735	err = nft_register_obj(&nft_tunnel_obj_type);
736	if (err < 0)
737		nft_unregister_expr(&nft_tunnel_type);
738
739	return err;
740}
741
742static void __exit nft_tunnel_module_exit(void)
743{
744	nft_unregister_obj(&nft_tunnel_obj_type);
745	nft_unregister_expr(&nft_tunnel_type);
746}
747
748module_init(nft_tunnel_module_init);
749module_exit(nft_tunnel_module_exit);
750
751MODULE_LICENSE("GPL");
752MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
753MODULE_ALIAS_NFT_EXPR("tunnel");
754MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
755MODULE_DESCRIPTION("nftables tunnel expression support");