Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/module.h>
5#include <linux/seqlock.h>
6#include <linux/netlink.h>
7#include <linux/netfilter.h>
8#include <linux/netfilter/nf_tables.h>
9#include <net/netfilter/nf_tables.h>
10#include <net/dst_metadata.h>
11#include <net/ip_tunnels.h>
12#include <net/vxlan.h>
13#include <net/erspan.h>
14#include <net/geneve.h>
15
16struct nft_tunnel {
17 enum nft_tunnel_keys key:8;
18 u8 dreg;
19 enum nft_tunnel_mode mode:8;
20};
21
22static void nft_tunnel_get_eval(const struct nft_expr *expr,
23 struct nft_regs *regs,
24 const struct nft_pktinfo *pkt)
25{
26 const struct nft_tunnel *priv = nft_expr_priv(expr);
27 u32 *dest = ®s->data[priv->dreg];
28 struct ip_tunnel_info *tun_info;
29
30 tun_info = skb_tunnel_info(pkt->skb);
31
32 switch (priv->key) {
33 case NFT_TUNNEL_PATH:
34 if (!tun_info) {
35 nft_reg_store8(dest, false);
36 return;
37 }
38 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
39 (priv->mode == NFT_TUNNEL_MODE_RX &&
40 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
41 (priv->mode == NFT_TUNNEL_MODE_TX &&
42 (tun_info->mode & IP_TUNNEL_INFO_TX)))
43 nft_reg_store8(dest, true);
44 else
45 nft_reg_store8(dest, false);
46 break;
47 case NFT_TUNNEL_ID:
48 if (!tun_info) {
49 regs->verdict.code = NFT_BREAK;
50 return;
51 }
52 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
53 (priv->mode == NFT_TUNNEL_MODE_RX &&
54 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
55 (priv->mode == NFT_TUNNEL_MODE_TX &&
56 (tun_info->mode & IP_TUNNEL_INFO_TX)))
57 *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
58 else
59 regs->verdict.code = NFT_BREAK;
60 break;
61 default:
62 WARN_ON(1);
63 regs->verdict.code = NFT_BREAK;
64 }
65}
66
67static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
68 [NFTA_TUNNEL_KEY] = { .type = NLA_U32 },
69 [NFTA_TUNNEL_DREG] = { .type = NLA_U32 },
70 [NFTA_TUNNEL_MODE] = { .type = NLA_U32 },
71};
72
73static int nft_tunnel_get_init(const struct nft_ctx *ctx,
74 const struct nft_expr *expr,
75 const struct nlattr * const tb[])
76{
77 struct nft_tunnel *priv = nft_expr_priv(expr);
78 u32 len;
79
80 if (!tb[NFTA_TUNNEL_KEY] ||
81 !tb[NFTA_TUNNEL_DREG])
82 return -EINVAL;
83
84 priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
85 switch (priv->key) {
86 case NFT_TUNNEL_PATH:
87 len = sizeof(u8);
88 break;
89 case NFT_TUNNEL_ID:
90 len = sizeof(u32);
91 break;
92 default:
93 return -EOPNOTSUPP;
94 }
95
96 if (tb[NFTA_TUNNEL_MODE]) {
97 priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
98 if (priv->mode > NFT_TUNNEL_MODE_MAX)
99 return -EOPNOTSUPP;
100 } else {
101 priv->mode = NFT_TUNNEL_MODE_NONE;
102 }
103
104 return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg,
105 NULL, NFT_DATA_VALUE, len);
106}
107
108static int nft_tunnel_get_dump(struct sk_buff *skb,
109 const struct nft_expr *expr)
110{
111 const struct nft_tunnel *priv = nft_expr_priv(expr);
112
113 if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key)))
114 goto nla_put_failure;
115 if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
116 goto nla_put_failure;
117 if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode)))
118 goto nla_put_failure;
119 return 0;
120
121nla_put_failure:
122 return -1;
123}
124
125static struct nft_expr_type nft_tunnel_type;
126static const struct nft_expr_ops nft_tunnel_get_ops = {
127 .type = &nft_tunnel_type,
128 .size = NFT_EXPR_SIZE(sizeof(struct nft_tunnel)),
129 .eval = nft_tunnel_get_eval,
130 .init = nft_tunnel_get_init,
131 .dump = nft_tunnel_get_dump,
132};
133
134static struct nft_expr_type nft_tunnel_type __read_mostly = {
135 .name = "tunnel",
136 .ops = &nft_tunnel_get_ops,
137 .policy = nft_tunnel_policy,
138 .maxattr = NFTA_TUNNEL_MAX,
139 .owner = THIS_MODULE,
140};
141
142struct nft_tunnel_opts {
143 union {
144 struct vxlan_metadata vxlan;
145 struct erspan_metadata erspan;
146 u8 data[IP_TUNNEL_OPTS_MAX];
147 } u;
148 u32 len;
149 __be16 flags;
150};
151
152struct nft_tunnel_obj {
153 struct metadata_dst *md;
154 struct nft_tunnel_opts opts;
155};
156
157static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = {
158 [NFTA_TUNNEL_KEY_IP_SRC] = { .type = NLA_U32 },
159 [NFTA_TUNNEL_KEY_IP_DST] = { .type = NLA_U32 },
160};
161
162static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx,
163 const struct nlattr *attr,
164 struct ip_tunnel_info *info)
165{
166 struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
167 int err;
168
169 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr,
170 nft_tunnel_ip_policy, NULL);
171 if (err < 0)
172 return err;
173
174 if (!tb[NFTA_TUNNEL_KEY_IP_DST])
175 return -EINVAL;
176
177 if (tb[NFTA_TUNNEL_KEY_IP_SRC])
178 info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]);
179 if (tb[NFTA_TUNNEL_KEY_IP_DST])
180 info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]);
181
182 return 0;
183}
184
185static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = {
186 [NFTA_TUNNEL_KEY_IP6_SRC] = { .len = sizeof(struct in6_addr), },
187 [NFTA_TUNNEL_KEY_IP6_DST] = { .len = sizeof(struct in6_addr), },
188 [NFTA_TUNNEL_KEY_IP6_FLOWLABEL] = { .type = NLA_U32, }
189};
190
191static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx,
192 const struct nlattr *attr,
193 struct ip_tunnel_info *info)
194{
195 struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
196 int err;
197
198 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr,
199 nft_tunnel_ip6_policy, NULL);
200 if (err < 0)
201 return err;
202
203 if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
204 return -EINVAL;
205
206 if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
207 memcpy(&info->key.u.ipv6.src,
208 nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
209 sizeof(struct in6_addr));
210 }
211 if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
212 memcpy(&info->key.u.ipv6.dst,
213 nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
214 sizeof(struct in6_addr));
215 }
216 if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
217 info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
218
219 info->mode |= IP_TUNNEL_INFO_IPV6;
220
221 return 0;
222}
223
224static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = {
225 [NFTA_TUNNEL_KEY_VXLAN_GBP] = { .type = NLA_U32 },
226};
227
228static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
229 struct nft_tunnel_opts *opts)
230{
231 struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
232 int err;
233
234 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr,
235 nft_tunnel_opts_vxlan_policy, NULL);
236 if (err < 0)
237 return err;
238
239 if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
240 return -EINVAL;
241
242 opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
243
244 opts->len = sizeof(struct vxlan_metadata);
245 opts->flags = TUNNEL_VXLAN_OPT;
246
247 return 0;
248}
249
250static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
251 [NFTA_TUNNEL_KEY_ERSPAN_VERSION] = { .type = NLA_U32 },
252 [NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 },
253 [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 },
254 [NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 },
255};
256
257static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
258 struct nft_tunnel_opts *opts)
259{
260 struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
261 uint8_t hwid, dir;
262 int err, version;
263
264 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX,
265 attr, nft_tunnel_opts_erspan_policy,
266 NULL);
267 if (err < 0)
268 return err;
269
270 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
271 return -EINVAL;
272
273 version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
274 switch (version) {
275 case ERSPAN_VERSION:
276 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
277 return -EINVAL;
278
279 opts->u.erspan.u.index =
280 nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
281 break;
282 case ERSPAN_VERSION2:
283 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
284 !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
285 return -EINVAL;
286
287 hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
288 dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
289
290 set_hwid(&opts->u.erspan.u.md2, hwid);
291 opts->u.erspan.u.md2.dir = dir;
292 break;
293 default:
294 return -EOPNOTSUPP;
295 }
296 opts->u.erspan.version = version;
297
298 opts->len = sizeof(struct erspan_metadata);
299 opts->flags = TUNNEL_ERSPAN_OPT;
300
301 return 0;
302}
303
304static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = {
305 [NFTA_TUNNEL_KEY_GENEVE_CLASS] = { .type = NLA_U16 },
306 [NFTA_TUNNEL_KEY_GENEVE_TYPE] = { .type = NLA_U8 },
307 [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 },
308};
309
310static int nft_tunnel_obj_geneve_init(const struct nlattr *attr,
311 struct nft_tunnel_opts *opts)
312{
313 struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len;
314 struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1];
315 int err, data_len;
316
317 err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_GENEVE_MAX, attr,
318 nft_tunnel_opts_geneve_policy, NULL);
319 if (err < 0)
320 return err;
321
322 if (!tb[NFTA_TUNNEL_KEY_GENEVE_CLASS] ||
323 !tb[NFTA_TUNNEL_KEY_GENEVE_TYPE] ||
324 !tb[NFTA_TUNNEL_KEY_GENEVE_DATA])
325 return -EINVAL;
326
327 attr = tb[NFTA_TUNNEL_KEY_GENEVE_DATA];
328 data_len = nla_len(attr);
329 if (data_len % 4)
330 return -EINVAL;
331
332 opts->len += sizeof(*opt) + data_len;
333 if (opts->len > IP_TUNNEL_OPTS_MAX)
334 return -EINVAL;
335
336 memcpy(opt->opt_data, nla_data(attr), data_len);
337 opt->length = data_len / 4;
338 opt->opt_class = nla_get_be16(tb[NFTA_TUNNEL_KEY_GENEVE_CLASS]);
339 opt->type = nla_get_u8(tb[NFTA_TUNNEL_KEY_GENEVE_TYPE]);
340 opts->flags = TUNNEL_GENEVE_OPT;
341
342 return 0;
343}
344
345static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
346 [NFTA_TUNNEL_KEY_OPTS_UNSPEC] = {
347 .strict_start_type = NFTA_TUNNEL_KEY_OPTS_GENEVE },
348 [NFTA_TUNNEL_KEY_OPTS_VXLAN] = { .type = NLA_NESTED, },
349 [NFTA_TUNNEL_KEY_OPTS_ERSPAN] = { .type = NLA_NESTED, },
350 [NFTA_TUNNEL_KEY_OPTS_GENEVE] = { .type = NLA_NESTED, },
351};
352
353static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
354 const struct nlattr *attr,
355 struct ip_tunnel_info *info,
356 struct nft_tunnel_opts *opts)
357{
358 int err, rem, type = 0;
359 struct nlattr *nla;
360
361 err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX,
362 nft_tunnel_opts_policy, NULL);
363 if (err < 0)
364 return err;
365
366 nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
367 switch (nla_type(nla)) {
368 case NFTA_TUNNEL_KEY_OPTS_VXLAN:
369 if (type)
370 return -EINVAL;
371 err = nft_tunnel_obj_vxlan_init(nla, opts);
372 if (err)
373 return err;
374 type = TUNNEL_VXLAN_OPT;
375 break;
376 case NFTA_TUNNEL_KEY_OPTS_ERSPAN:
377 if (type)
378 return -EINVAL;
379 err = nft_tunnel_obj_erspan_init(nla, opts);
380 if (err)
381 return err;
382 type = TUNNEL_ERSPAN_OPT;
383 break;
384 case NFTA_TUNNEL_KEY_OPTS_GENEVE:
385 if (type && type != TUNNEL_GENEVE_OPT)
386 return -EINVAL;
387 err = nft_tunnel_obj_geneve_init(nla, opts);
388 if (err)
389 return err;
390 type = TUNNEL_GENEVE_OPT;
391 break;
392 default:
393 return -EOPNOTSUPP;
394 }
395 }
396
397 return err;
398}
399
400static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = {
401 [NFTA_TUNNEL_KEY_IP] = { .type = NLA_NESTED, },
402 [NFTA_TUNNEL_KEY_IP6] = { .type = NLA_NESTED, },
403 [NFTA_TUNNEL_KEY_ID] = { .type = NLA_U32, },
404 [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
405 [NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, },
406 [NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, },
407 [NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, },
408 [NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, },
409 [NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, },
410};
411
412static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
413 const struct nlattr * const tb[],
414 struct nft_object *obj)
415{
416 struct nft_tunnel_obj *priv = nft_obj_data(obj);
417 struct ip_tunnel_info info;
418 struct metadata_dst *md;
419 int err;
420
421 if (!tb[NFTA_TUNNEL_KEY_ID])
422 return -EINVAL;
423
424 memset(&info, 0, sizeof(info));
425 info.mode = IP_TUNNEL_INFO_TX;
426 info.key.tun_id = key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
427 info.key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
428
429 if (tb[NFTA_TUNNEL_KEY_IP]) {
430 err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
431 if (err < 0)
432 return err;
433 } else if (tb[NFTA_TUNNEL_KEY_IP6]) {
434 err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info);
435 if (err < 0)
436 return err;
437 } else {
438 return -EINVAL;
439 }
440
441 if (tb[NFTA_TUNNEL_KEY_SPORT]) {
442 info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]);
443 }
444 if (tb[NFTA_TUNNEL_KEY_DPORT]) {
445 info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]);
446 }
447
448 if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
449 u32 tun_flags;
450
451 tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
452 if (tun_flags & ~NFT_TUNNEL_F_MASK)
453 return -EOPNOTSUPP;
454
455 if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
456 info.key.tun_flags &= ~TUNNEL_CSUM;
457 if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
458 info.key.tun_flags |= TUNNEL_DONT_FRAGMENT;
459 if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
460 info.key.tun_flags |= TUNNEL_SEQ;
461 }
462 if (tb[NFTA_TUNNEL_KEY_TOS])
463 info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
464 if (tb[NFTA_TUNNEL_KEY_TTL])
465 info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]);
466 else
467 info.key.ttl = U8_MAX;
468
469 if (tb[NFTA_TUNNEL_KEY_OPTS]) {
470 err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
471 &info, &priv->opts);
472 if (err < 0)
473 return err;
474 }
475
476 md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
477 if (!md)
478 return -ENOMEM;
479
480 memcpy(&md->u.tun_info, &info, sizeof(info));
481#ifdef CONFIG_DST_CACHE
482 err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
483 if (err < 0) {
484 metadata_dst_free(md);
485 return err;
486 }
487#endif
488 ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
489 priv->opts.flags);
490 priv->md = md;
491
492 return 0;
493}
494
495static inline void nft_tunnel_obj_eval(struct nft_object *obj,
496 struct nft_regs *regs,
497 const struct nft_pktinfo *pkt)
498{
499 struct nft_tunnel_obj *priv = nft_obj_data(obj);
500 struct sk_buff *skb = pkt->skb;
501
502 skb_dst_drop(skb);
503 dst_hold((struct dst_entry *) priv->md);
504 skb_dst_set(skb, (struct dst_entry *) priv->md);
505}
506
507static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
508{
509 struct nlattr *nest;
510
511 if (info->mode & IP_TUNNEL_INFO_IPV6) {
512 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP6);
513 if (!nest)
514 return -1;
515
516 if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC,
517 &info->key.u.ipv6.src) < 0 ||
518 nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST,
519 &info->key.u.ipv6.dst) < 0 ||
520 nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
521 info->key.label)) {
522 nla_nest_cancel(skb, nest);
523 return -1;
524 }
525
526 nla_nest_end(skb, nest);
527 } else {
528 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP);
529 if (!nest)
530 return -1;
531
532 if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC,
533 info->key.u.ipv4.src) < 0 ||
534 nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST,
535 info->key.u.ipv4.dst) < 0) {
536 nla_nest_cancel(skb, nest);
537 return -1;
538 }
539
540 nla_nest_end(skb, nest);
541 }
542
543 return 0;
544}
545
546static int nft_tunnel_opts_dump(struct sk_buff *skb,
547 struct nft_tunnel_obj *priv)
548{
549 struct nft_tunnel_opts *opts = &priv->opts;
550 struct nlattr *nest, *inner;
551
552 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS);
553 if (!nest)
554 return -1;
555
556 if (opts->flags & TUNNEL_VXLAN_OPT) {
557 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN);
558 if (!inner)
559 goto failure;
560 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
561 htonl(opts->u.vxlan.gbp)))
562 goto inner_failure;
563 nla_nest_end(skb, inner);
564 } else if (opts->flags & TUNNEL_ERSPAN_OPT) {
565 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN);
566 if (!inner)
567 goto failure;
568 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_VERSION,
569 htonl(opts->u.erspan.version)))
570 goto inner_failure;
571 switch (opts->u.erspan.version) {
572 case ERSPAN_VERSION:
573 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
574 opts->u.erspan.u.index))
575 goto inner_failure;
576 break;
577 case ERSPAN_VERSION2:
578 if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
579 get_hwid(&opts->u.erspan.u.md2)) ||
580 nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
581 opts->u.erspan.u.md2.dir))
582 goto inner_failure;
583 break;
584 }
585 nla_nest_end(skb, inner);
586 } else if (opts->flags & TUNNEL_GENEVE_OPT) {
587 struct geneve_opt *opt;
588 int offset = 0;
589
590 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_GENEVE);
591 if (!inner)
592 goto failure;
593 while (opts->len > offset) {
594 opt = (struct geneve_opt *)opts->u.data + offset;
595 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS,
596 opt->opt_class) ||
597 nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE,
598 opt->type) ||
599 nla_put(skb, NFTA_TUNNEL_KEY_GENEVE_DATA,
600 opt->length * 4, opt->opt_data))
601 goto inner_failure;
602 offset += sizeof(*opt) + opt->length * 4;
603 }
604 nla_nest_end(skb, inner);
605 }
606 nla_nest_end(skb, nest);
607 return 0;
608
609inner_failure:
610 nla_nest_cancel(skb, inner);
611failure:
612 nla_nest_cancel(skb, nest);
613 return -1;
614}
615
616static int nft_tunnel_ports_dump(struct sk_buff *skb,
617 struct ip_tunnel_info *info)
618{
619 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
620 nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
621 return -1;
622
623 return 0;
624}
625
626static int nft_tunnel_flags_dump(struct sk_buff *skb,
627 struct ip_tunnel_info *info)
628{
629 u32 flags = 0;
630
631 if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
632 flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
633 if (!(info->key.tun_flags & TUNNEL_CSUM))
634 flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
635 if (info->key.tun_flags & TUNNEL_SEQ)
636 flags |= NFT_TUNNEL_F_SEQ_NUMBER;
637
638 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
639 return -1;
640
641 return 0;
642}
643
644static int nft_tunnel_obj_dump(struct sk_buff *skb,
645 struct nft_object *obj, bool reset)
646{
647 struct nft_tunnel_obj *priv = nft_obj_data(obj);
648 struct ip_tunnel_info *info = &priv->md->u.tun_info;
649
650 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID,
651 tunnel_id_to_key32(info->key.tun_id)) ||
652 nft_tunnel_ip_dump(skb, info) < 0 ||
653 nft_tunnel_ports_dump(skb, info) < 0 ||
654 nft_tunnel_flags_dump(skb, info) < 0 ||
655 nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) ||
656 nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) ||
657 nft_tunnel_opts_dump(skb, priv) < 0)
658 goto nla_put_failure;
659
660 return 0;
661
662nla_put_failure:
663 return -1;
664}
665
666static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx,
667 struct nft_object *obj)
668{
669 struct nft_tunnel_obj *priv = nft_obj_data(obj);
670
671 metadata_dst_free(priv->md);
672}
673
674static struct nft_object_type nft_tunnel_obj_type;
675static const struct nft_object_ops nft_tunnel_obj_ops = {
676 .type = &nft_tunnel_obj_type,
677 .size = sizeof(struct nft_tunnel_obj),
678 .eval = nft_tunnel_obj_eval,
679 .init = nft_tunnel_obj_init,
680 .destroy = nft_tunnel_obj_destroy,
681 .dump = nft_tunnel_obj_dump,
682};
683
684static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
685 .type = NFT_OBJECT_TUNNEL,
686 .ops = &nft_tunnel_obj_ops,
687 .maxattr = NFTA_TUNNEL_KEY_MAX,
688 .policy = nft_tunnel_key_policy,
689 .owner = THIS_MODULE,
690};
691
692static int __init nft_tunnel_module_init(void)
693{
694 int err;
695
696 err = nft_register_expr(&nft_tunnel_type);
697 if (err < 0)
698 return err;
699
700 err = nft_register_obj(&nft_tunnel_obj_type);
701 if (err < 0)
702 nft_unregister_expr(&nft_tunnel_type);
703
704 return err;
705}
706
707static void __exit nft_tunnel_module_exit(void)
708{
709 nft_unregister_obj(&nft_tunnel_obj_type);
710 nft_unregister_expr(&nft_tunnel_type);
711}
712
713module_init(nft_tunnel_module_init);
714module_exit(nft_tunnel_module_exit);
715
716MODULE_LICENSE("GPL");
717MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
718MODULE_ALIAS_NFT_EXPR("tunnel");
719MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
720MODULE_DESCRIPTION("nftables tunnel expression support");
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/module.h>
5#include <linux/seqlock.h>
6#include <linux/netlink.h>
7#include <linux/netfilter.h>
8#include <linux/netfilter/nf_tables.h>
9#include <net/netfilter/nf_tables.h>
10#include <net/dst_metadata.h>
11#include <net/ip_tunnels.h>
12#include <net/vxlan.h>
13#include <net/erspan.h>
14#include <net/geneve.h>
15
16struct nft_tunnel {
17 enum nft_tunnel_keys key:8;
18 u8 dreg;
19 enum nft_tunnel_mode mode:8;
20 u8 len;
21};
22
23static void nft_tunnel_get_eval(const struct nft_expr *expr,
24 struct nft_regs *regs,
25 const struct nft_pktinfo *pkt)
26{
27 const struct nft_tunnel *priv = nft_expr_priv(expr);
28 u32 *dest = ®s->data[priv->dreg];
29 struct ip_tunnel_info *tun_info;
30
31 tun_info = skb_tunnel_info(pkt->skb);
32
33 switch (priv->key) {
34 case NFT_TUNNEL_PATH:
35 if (!tun_info) {
36 nft_reg_store8(dest, false);
37 return;
38 }
39 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
40 (priv->mode == NFT_TUNNEL_MODE_RX &&
41 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
42 (priv->mode == NFT_TUNNEL_MODE_TX &&
43 (tun_info->mode & IP_TUNNEL_INFO_TX)))
44 nft_reg_store8(dest, true);
45 else
46 nft_reg_store8(dest, false);
47 break;
48 case NFT_TUNNEL_ID:
49 if (!tun_info) {
50 regs->verdict.code = NFT_BREAK;
51 return;
52 }
53 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
54 (priv->mode == NFT_TUNNEL_MODE_RX &&
55 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
56 (priv->mode == NFT_TUNNEL_MODE_TX &&
57 (tun_info->mode & IP_TUNNEL_INFO_TX)))
58 *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
59 else
60 regs->verdict.code = NFT_BREAK;
61 break;
62 default:
63 WARN_ON(1);
64 regs->verdict.code = NFT_BREAK;
65 }
66}
67
68static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
69 [NFTA_TUNNEL_KEY] = NLA_POLICY_MAX(NLA_BE32, 255),
70 [NFTA_TUNNEL_DREG] = { .type = NLA_U32 },
71 [NFTA_TUNNEL_MODE] = NLA_POLICY_MAX(NLA_BE32, 255),
72};
73
74static int nft_tunnel_get_init(const struct nft_ctx *ctx,
75 const struct nft_expr *expr,
76 const struct nlattr * const tb[])
77{
78 struct nft_tunnel *priv = nft_expr_priv(expr);
79 u32 len;
80
81 if (!tb[NFTA_TUNNEL_KEY] ||
82 !tb[NFTA_TUNNEL_DREG])
83 return -EINVAL;
84
85 priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
86 switch (priv->key) {
87 case NFT_TUNNEL_PATH:
88 len = sizeof(u8);
89 break;
90 case NFT_TUNNEL_ID:
91 len = sizeof(u32);
92 break;
93 default:
94 return -EOPNOTSUPP;
95 }
96
97 if (tb[NFTA_TUNNEL_MODE]) {
98 priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
99 if (priv->mode > NFT_TUNNEL_MODE_MAX)
100 return -EOPNOTSUPP;
101 } else {
102 priv->mode = NFT_TUNNEL_MODE_NONE;
103 }
104
105 priv->len = len;
106 return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg,
107 NULL, NFT_DATA_VALUE, len);
108}
109
110static int nft_tunnel_get_dump(struct sk_buff *skb,
111 const struct nft_expr *expr, bool reset)
112{
113 const struct nft_tunnel *priv = nft_expr_priv(expr);
114
115 if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key)))
116 goto nla_put_failure;
117 if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
118 goto nla_put_failure;
119 if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode)))
120 goto nla_put_failure;
121 return 0;
122
123nla_put_failure:
124 return -1;
125}
126
127static bool nft_tunnel_get_reduce(struct nft_regs_track *track,
128 const struct nft_expr *expr)
129{
130 const struct nft_tunnel *priv = nft_expr_priv(expr);
131 const struct nft_tunnel *tunnel;
132
133 if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
134 nft_reg_track_update(track, expr, priv->dreg, priv->len);
135 return false;
136 }
137
138 tunnel = nft_expr_priv(track->regs[priv->dreg].selector);
139 if (priv->key != tunnel->key ||
140 priv->dreg != tunnel->dreg ||
141 priv->mode != tunnel->mode) {
142 nft_reg_track_update(track, expr, priv->dreg, priv->len);
143 return false;
144 }
145
146 if (!track->regs[priv->dreg].bitwise)
147 return true;
148
149 return false;
150}
151
152static struct nft_expr_type nft_tunnel_type;
153static const struct nft_expr_ops nft_tunnel_get_ops = {
154 .type = &nft_tunnel_type,
155 .size = NFT_EXPR_SIZE(sizeof(struct nft_tunnel)),
156 .eval = nft_tunnel_get_eval,
157 .init = nft_tunnel_get_init,
158 .dump = nft_tunnel_get_dump,
159 .reduce = nft_tunnel_get_reduce,
160};
161
162static struct nft_expr_type nft_tunnel_type __read_mostly = {
163 .name = "tunnel",
164 .family = NFPROTO_NETDEV,
165 .ops = &nft_tunnel_get_ops,
166 .policy = nft_tunnel_policy,
167 .maxattr = NFTA_TUNNEL_MAX,
168 .owner = THIS_MODULE,
169};
170
171struct nft_tunnel_opts {
172 union {
173 struct vxlan_metadata vxlan;
174 struct erspan_metadata erspan;
175 u8 data[IP_TUNNEL_OPTS_MAX];
176 } u;
177 u32 len;
178 __be16 flags;
179};
180
181struct nft_tunnel_obj {
182 struct metadata_dst *md;
183 struct nft_tunnel_opts opts;
184};
185
186static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = {
187 [NFTA_TUNNEL_KEY_IP_SRC] = { .type = NLA_U32 },
188 [NFTA_TUNNEL_KEY_IP_DST] = { .type = NLA_U32 },
189};
190
191static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx,
192 const struct nlattr *attr,
193 struct ip_tunnel_info *info)
194{
195 struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
196 int err;
197
198 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr,
199 nft_tunnel_ip_policy, NULL);
200 if (err < 0)
201 return err;
202
203 if (!tb[NFTA_TUNNEL_KEY_IP_DST])
204 return -EINVAL;
205
206 if (tb[NFTA_TUNNEL_KEY_IP_SRC])
207 info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]);
208 if (tb[NFTA_TUNNEL_KEY_IP_DST])
209 info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]);
210
211 return 0;
212}
213
214static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = {
215 [NFTA_TUNNEL_KEY_IP6_SRC] = { .len = sizeof(struct in6_addr), },
216 [NFTA_TUNNEL_KEY_IP6_DST] = { .len = sizeof(struct in6_addr), },
217 [NFTA_TUNNEL_KEY_IP6_FLOWLABEL] = { .type = NLA_U32, }
218};
219
220static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx,
221 const struct nlattr *attr,
222 struct ip_tunnel_info *info)
223{
224 struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
225 int err;
226
227 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr,
228 nft_tunnel_ip6_policy, NULL);
229 if (err < 0)
230 return err;
231
232 if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
233 return -EINVAL;
234
235 if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
236 memcpy(&info->key.u.ipv6.src,
237 nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
238 sizeof(struct in6_addr));
239 }
240 if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
241 memcpy(&info->key.u.ipv6.dst,
242 nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
243 sizeof(struct in6_addr));
244 }
245 if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
246 info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
247
248 info->mode |= IP_TUNNEL_INFO_IPV6;
249
250 return 0;
251}
252
253static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = {
254 [NFTA_TUNNEL_KEY_VXLAN_GBP] = { .type = NLA_U32 },
255};
256
257static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
258 struct nft_tunnel_opts *opts)
259{
260 struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
261 int err;
262
263 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr,
264 nft_tunnel_opts_vxlan_policy, NULL);
265 if (err < 0)
266 return err;
267
268 if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
269 return -EINVAL;
270
271 opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
272
273 opts->len = sizeof(struct vxlan_metadata);
274 opts->flags = TUNNEL_VXLAN_OPT;
275
276 return 0;
277}
278
279static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
280 [NFTA_TUNNEL_KEY_ERSPAN_VERSION] = { .type = NLA_U32 },
281 [NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 },
282 [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 },
283 [NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 },
284};
285
286static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
287 struct nft_tunnel_opts *opts)
288{
289 struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
290 uint8_t hwid, dir;
291 int err, version;
292
293 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX,
294 attr, nft_tunnel_opts_erspan_policy,
295 NULL);
296 if (err < 0)
297 return err;
298
299 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
300 return -EINVAL;
301
302 version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
303 switch (version) {
304 case ERSPAN_VERSION:
305 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
306 return -EINVAL;
307
308 opts->u.erspan.u.index =
309 nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
310 break;
311 case ERSPAN_VERSION2:
312 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
313 !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
314 return -EINVAL;
315
316 hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
317 dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
318
319 set_hwid(&opts->u.erspan.u.md2, hwid);
320 opts->u.erspan.u.md2.dir = dir;
321 break;
322 default:
323 return -EOPNOTSUPP;
324 }
325 opts->u.erspan.version = version;
326
327 opts->len = sizeof(struct erspan_metadata);
328 opts->flags = TUNNEL_ERSPAN_OPT;
329
330 return 0;
331}
332
333static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = {
334 [NFTA_TUNNEL_KEY_GENEVE_CLASS] = { .type = NLA_U16 },
335 [NFTA_TUNNEL_KEY_GENEVE_TYPE] = { .type = NLA_U8 },
336 [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 },
337};
338
339static int nft_tunnel_obj_geneve_init(const struct nlattr *attr,
340 struct nft_tunnel_opts *opts)
341{
342 struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len;
343 struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1];
344 int err, data_len;
345
346 err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_GENEVE_MAX, attr,
347 nft_tunnel_opts_geneve_policy, NULL);
348 if (err < 0)
349 return err;
350
351 if (!tb[NFTA_TUNNEL_KEY_GENEVE_CLASS] ||
352 !tb[NFTA_TUNNEL_KEY_GENEVE_TYPE] ||
353 !tb[NFTA_TUNNEL_KEY_GENEVE_DATA])
354 return -EINVAL;
355
356 attr = tb[NFTA_TUNNEL_KEY_GENEVE_DATA];
357 data_len = nla_len(attr);
358 if (data_len % 4)
359 return -EINVAL;
360
361 opts->len += sizeof(*opt) + data_len;
362 if (opts->len > IP_TUNNEL_OPTS_MAX)
363 return -EINVAL;
364
365 memcpy(opt->opt_data, nla_data(attr), data_len);
366 opt->length = data_len / 4;
367 opt->opt_class = nla_get_be16(tb[NFTA_TUNNEL_KEY_GENEVE_CLASS]);
368 opt->type = nla_get_u8(tb[NFTA_TUNNEL_KEY_GENEVE_TYPE]);
369 opts->flags = TUNNEL_GENEVE_OPT;
370
371 return 0;
372}
373
374static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
375 [NFTA_TUNNEL_KEY_OPTS_UNSPEC] = {
376 .strict_start_type = NFTA_TUNNEL_KEY_OPTS_GENEVE },
377 [NFTA_TUNNEL_KEY_OPTS_VXLAN] = { .type = NLA_NESTED, },
378 [NFTA_TUNNEL_KEY_OPTS_ERSPAN] = { .type = NLA_NESTED, },
379 [NFTA_TUNNEL_KEY_OPTS_GENEVE] = { .type = NLA_NESTED, },
380};
381
382static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
383 const struct nlattr *attr,
384 struct ip_tunnel_info *info,
385 struct nft_tunnel_opts *opts)
386{
387 struct nlattr *nla;
388 __be16 type = 0;
389 int err, rem;
390
391 err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX,
392 nft_tunnel_opts_policy, NULL);
393 if (err < 0)
394 return err;
395
396 nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
397 switch (nla_type(nla)) {
398 case NFTA_TUNNEL_KEY_OPTS_VXLAN:
399 if (type)
400 return -EINVAL;
401 err = nft_tunnel_obj_vxlan_init(nla, opts);
402 if (err)
403 return err;
404 type = TUNNEL_VXLAN_OPT;
405 break;
406 case NFTA_TUNNEL_KEY_OPTS_ERSPAN:
407 if (type)
408 return -EINVAL;
409 err = nft_tunnel_obj_erspan_init(nla, opts);
410 if (err)
411 return err;
412 type = TUNNEL_ERSPAN_OPT;
413 break;
414 case NFTA_TUNNEL_KEY_OPTS_GENEVE:
415 if (type && type != TUNNEL_GENEVE_OPT)
416 return -EINVAL;
417 err = nft_tunnel_obj_geneve_init(nla, opts);
418 if (err)
419 return err;
420 type = TUNNEL_GENEVE_OPT;
421 break;
422 default:
423 return -EOPNOTSUPP;
424 }
425 }
426
427 return err;
428}
429
430static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = {
431 [NFTA_TUNNEL_KEY_IP] = { .type = NLA_NESTED, },
432 [NFTA_TUNNEL_KEY_IP6] = { .type = NLA_NESTED, },
433 [NFTA_TUNNEL_KEY_ID] = { .type = NLA_U32, },
434 [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
435 [NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, },
436 [NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, },
437 [NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, },
438 [NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, },
439 [NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, },
440};
441
442static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
443 const struct nlattr * const tb[],
444 struct nft_object *obj)
445{
446 struct nft_tunnel_obj *priv = nft_obj_data(obj);
447 struct ip_tunnel_info info;
448 struct metadata_dst *md;
449 int err;
450
451 if (!tb[NFTA_TUNNEL_KEY_ID])
452 return -EINVAL;
453
454 memset(&info, 0, sizeof(info));
455 info.mode = IP_TUNNEL_INFO_TX;
456 info.key.tun_id = key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
457 info.key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
458
459 if (tb[NFTA_TUNNEL_KEY_IP]) {
460 err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
461 if (err < 0)
462 return err;
463 } else if (tb[NFTA_TUNNEL_KEY_IP6]) {
464 err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info);
465 if (err < 0)
466 return err;
467 } else {
468 return -EINVAL;
469 }
470
471 if (tb[NFTA_TUNNEL_KEY_SPORT]) {
472 info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]);
473 }
474 if (tb[NFTA_TUNNEL_KEY_DPORT]) {
475 info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]);
476 }
477
478 if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
479 u32 tun_flags;
480
481 tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
482 if (tun_flags & ~NFT_TUNNEL_F_MASK)
483 return -EOPNOTSUPP;
484
485 if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
486 info.key.tun_flags &= ~TUNNEL_CSUM;
487 if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
488 info.key.tun_flags |= TUNNEL_DONT_FRAGMENT;
489 if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
490 info.key.tun_flags |= TUNNEL_SEQ;
491 }
492 if (tb[NFTA_TUNNEL_KEY_TOS])
493 info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
494 if (tb[NFTA_TUNNEL_KEY_TTL])
495 info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]);
496 else
497 info.key.ttl = U8_MAX;
498
499 if (tb[NFTA_TUNNEL_KEY_OPTS]) {
500 err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
501 &info, &priv->opts);
502 if (err < 0)
503 return err;
504 }
505
506 md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
507 if (!md)
508 return -ENOMEM;
509
510 memcpy(&md->u.tun_info, &info, sizeof(info));
511#ifdef CONFIG_DST_CACHE
512 err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
513 if (err < 0) {
514 metadata_dst_free(md);
515 return err;
516 }
517#endif
518 ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
519 priv->opts.flags);
520 priv->md = md;
521
522 return 0;
523}
524
525static inline void nft_tunnel_obj_eval(struct nft_object *obj,
526 struct nft_regs *regs,
527 const struct nft_pktinfo *pkt)
528{
529 struct nft_tunnel_obj *priv = nft_obj_data(obj);
530 struct sk_buff *skb = pkt->skb;
531
532 skb_dst_drop(skb);
533 dst_hold((struct dst_entry *) priv->md);
534 skb_dst_set(skb, (struct dst_entry *) priv->md);
535}
536
537static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
538{
539 struct nlattr *nest;
540
541 if (info->mode & IP_TUNNEL_INFO_IPV6) {
542 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP6);
543 if (!nest)
544 return -1;
545
546 if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC,
547 &info->key.u.ipv6.src) < 0 ||
548 nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST,
549 &info->key.u.ipv6.dst) < 0 ||
550 nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
551 info->key.label)) {
552 nla_nest_cancel(skb, nest);
553 return -1;
554 }
555
556 nla_nest_end(skb, nest);
557 } else {
558 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP);
559 if (!nest)
560 return -1;
561
562 if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC,
563 info->key.u.ipv4.src) < 0 ||
564 nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST,
565 info->key.u.ipv4.dst) < 0) {
566 nla_nest_cancel(skb, nest);
567 return -1;
568 }
569
570 nla_nest_end(skb, nest);
571 }
572
573 return 0;
574}
575
576static int nft_tunnel_opts_dump(struct sk_buff *skb,
577 struct nft_tunnel_obj *priv)
578{
579 struct nft_tunnel_opts *opts = &priv->opts;
580 struct nlattr *nest, *inner;
581
582 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS);
583 if (!nest)
584 return -1;
585
586 if (opts->flags & TUNNEL_VXLAN_OPT) {
587 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN);
588 if (!inner)
589 goto failure;
590 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
591 htonl(opts->u.vxlan.gbp)))
592 goto inner_failure;
593 nla_nest_end(skb, inner);
594 } else if (opts->flags & TUNNEL_ERSPAN_OPT) {
595 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN);
596 if (!inner)
597 goto failure;
598 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_VERSION,
599 htonl(opts->u.erspan.version)))
600 goto inner_failure;
601 switch (opts->u.erspan.version) {
602 case ERSPAN_VERSION:
603 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
604 opts->u.erspan.u.index))
605 goto inner_failure;
606 break;
607 case ERSPAN_VERSION2:
608 if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
609 get_hwid(&opts->u.erspan.u.md2)) ||
610 nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
611 opts->u.erspan.u.md2.dir))
612 goto inner_failure;
613 break;
614 }
615 nla_nest_end(skb, inner);
616 } else if (opts->flags & TUNNEL_GENEVE_OPT) {
617 struct geneve_opt *opt;
618 int offset = 0;
619
620 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_GENEVE);
621 if (!inner)
622 goto failure;
623 while (opts->len > offset) {
624 opt = (struct geneve_opt *)opts->u.data + offset;
625 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS,
626 opt->opt_class) ||
627 nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE,
628 opt->type) ||
629 nla_put(skb, NFTA_TUNNEL_KEY_GENEVE_DATA,
630 opt->length * 4, opt->opt_data))
631 goto inner_failure;
632 offset += sizeof(*opt) + opt->length * 4;
633 }
634 nla_nest_end(skb, inner);
635 }
636 nla_nest_end(skb, nest);
637 return 0;
638
639inner_failure:
640 nla_nest_cancel(skb, inner);
641failure:
642 nla_nest_cancel(skb, nest);
643 return -1;
644}
645
646static int nft_tunnel_ports_dump(struct sk_buff *skb,
647 struct ip_tunnel_info *info)
648{
649 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
650 nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
651 return -1;
652
653 return 0;
654}
655
656static int nft_tunnel_flags_dump(struct sk_buff *skb,
657 struct ip_tunnel_info *info)
658{
659 u32 flags = 0;
660
661 if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
662 flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
663 if (!(info->key.tun_flags & TUNNEL_CSUM))
664 flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
665 if (info->key.tun_flags & TUNNEL_SEQ)
666 flags |= NFT_TUNNEL_F_SEQ_NUMBER;
667
668 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
669 return -1;
670
671 return 0;
672}
673
674static int nft_tunnel_obj_dump(struct sk_buff *skb,
675 struct nft_object *obj, bool reset)
676{
677 struct nft_tunnel_obj *priv = nft_obj_data(obj);
678 struct ip_tunnel_info *info = &priv->md->u.tun_info;
679
680 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID,
681 tunnel_id_to_key32(info->key.tun_id)) ||
682 nft_tunnel_ip_dump(skb, info) < 0 ||
683 nft_tunnel_ports_dump(skb, info) < 0 ||
684 nft_tunnel_flags_dump(skb, info) < 0 ||
685 nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) ||
686 nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) ||
687 nft_tunnel_opts_dump(skb, priv) < 0)
688 goto nla_put_failure;
689
690 return 0;
691
692nla_put_failure:
693 return -1;
694}
695
696static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx,
697 struct nft_object *obj)
698{
699 struct nft_tunnel_obj *priv = nft_obj_data(obj);
700
701 metadata_dst_free(priv->md);
702}
703
704static struct nft_object_type nft_tunnel_obj_type;
705static const struct nft_object_ops nft_tunnel_obj_ops = {
706 .type = &nft_tunnel_obj_type,
707 .size = sizeof(struct nft_tunnel_obj),
708 .eval = nft_tunnel_obj_eval,
709 .init = nft_tunnel_obj_init,
710 .destroy = nft_tunnel_obj_destroy,
711 .dump = nft_tunnel_obj_dump,
712};
713
714static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
715 .type = NFT_OBJECT_TUNNEL,
716 .family = NFPROTO_NETDEV,
717 .ops = &nft_tunnel_obj_ops,
718 .maxattr = NFTA_TUNNEL_KEY_MAX,
719 .policy = nft_tunnel_key_policy,
720 .owner = THIS_MODULE,
721};
722
723static int __init nft_tunnel_module_init(void)
724{
725 int err;
726
727 err = nft_register_expr(&nft_tunnel_type);
728 if (err < 0)
729 return err;
730
731 err = nft_register_obj(&nft_tunnel_obj_type);
732 if (err < 0)
733 nft_unregister_expr(&nft_tunnel_type);
734
735 return err;
736}
737
738static void __exit nft_tunnel_module_exit(void)
739{
740 nft_unregister_obj(&nft_tunnel_obj_type);
741 nft_unregister_expr(&nft_tunnel_type);
742}
743
744module_init(nft_tunnel_module_init);
745module_exit(nft_tunnel_module_exit);
746
747MODULE_LICENSE("GPL");
748MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
749MODULE_ALIAS_NFT_EXPR("tunnel");
750MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
751MODULE_DESCRIPTION("nftables tunnel expression support");