Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/module.h>
5#include <linux/seqlock.h>
6#include <linux/netlink.h>
7#include <linux/netfilter.h>
8#include <linux/netfilter/nf_tables.h>
9#include <net/netfilter/nf_tables.h>
10#include <net/dst_metadata.h>
11#include <net/ip_tunnels.h>
12#include <net/vxlan.h>
13#include <net/erspan.h>
14#include <net/geneve.h>
15
16struct nft_tunnel {
17 enum nft_tunnel_keys key:8;
18 u8 dreg;
19 enum nft_tunnel_mode mode:8;
20};
21
22static void nft_tunnel_get_eval(const struct nft_expr *expr,
23 struct nft_regs *regs,
24 const struct nft_pktinfo *pkt)
25{
26 const struct nft_tunnel *priv = nft_expr_priv(expr);
27 u32 *dest = ®s->data[priv->dreg];
28 struct ip_tunnel_info *tun_info;
29
30 tun_info = skb_tunnel_info(pkt->skb);
31
32 switch (priv->key) {
33 case NFT_TUNNEL_PATH:
34 if (!tun_info) {
35 nft_reg_store8(dest, false);
36 return;
37 }
38 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
39 (priv->mode == NFT_TUNNEL_MODE_RX &&
40 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
41 (priv->mode == NFT_TUNNEL_MODE_TX &&
42 (tun_info->mode & IP_TUNNEL_INFO_TX)))
43 nft_reg_store8(dest, true);
44 else
45 nft_reg_store8(dest, false);
46 break;
47 case NFT_TUNNEL_ID:
48 if (!tun_info) {
49 regs->verdict.code = NFT_BREAK;
50 return;
51 }
52 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
53 (priv->mode == NFT_TUNNEL_MODE_RX &&
54 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
55 (priv->mode == NFT_TUNNEL_MODE_TX &&
56 (tun_info->mode & IP_TUNNEL_INFO_TX)))
57 *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
58 else
59 regs->verdict.code = NFT_BREAK;
60 break;
61 default:
62 WARN_ON(1);
63 regs->verdict.code = NFT_BREAK;
64 }
65}
66
67static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
68 [NFTA_TUNNEL_KEY] = { .type = NLA_U32 },
69 [NFTA_TUNNEL_DREG] = { .type = NLA_U32 },
70 [NFTA_TUNNEL_MODE] = { .type = NLA_U32 },
71};
72
73static int nft_tunnel_get_init(const struct nft_ctx *ctx,
74 const struct nft_expr *expr,
75 const struct nlattr * const tb[])
76{
77 struct nft_tunnel *priv = nft_expr_priv(expr);
78 u32 len;
79
80 if (!tb[NFTA_TUNNEL_KEY] ||
81 !tb[NFTA_TUNNEL_DREG])
82 return -EINVAL;
83
84 priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
85 switch (priv->key) {
86 case NFT_TUNNEL_PATH:
87 len = sizeof(u8);
88 break;
89 case NFT_TUNNEL_ID:
90 len = sizeof(u32);
91 break;
92 default:
93 return -EOPNOTSUPP;
94 }
95
96 if (tb[NFTA_TUNNEL_MODE]) {
97 priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
98 if (priv->mode > NFT_TUNNEL_MODE_MAX)
99 return -EOPNOTSUPP;
100 } else {
101 priv->mode = NFT_TUNNEL_MODE_NONE;
102 }
103
104 return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg,
105 NULL, NFT_DATA_VALUE, len);
106}
107
108static int nft_tunnel_get_dump(struct sk_buff *skb,
109 const struct nft_expr *expr)
110{
111 const struct nft_tunnel *priv = nft_expr_priv(expr);
112
113 if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key)))
114 goto nla_put_failure;
115 if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
116 goto nla_put_failure;
117 if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode)))
118 goto nla_put_failure;
119 return 0;
120
121nla_put_failure:
122 return -1;
123}
124
125static struct nft_expr_type nft_tunnel_type;
126static const struct nft_expr_ops nft_tunnel_get_ops = {
127 .type = &nft_tunnel_type,
128 .size = NFT_EXPR_SIZE(sizeof(struct nft_tunnel)),
129 .eval = nft_tunnel_get_eval,
130 .init = nft_tunnel_get_init,
131 .dump = nft_tunnel_get_dump,
132};
133
134static struct nft_expr_type nft_tunnel_type __read_mostly = {
135 .name = "tunnel",
136 .ops = &nft_tunnel_get_ops,
137 .policy = nft_tunnel_policy,
138 .maxattr = NFTA_TUNNEL_MAX,
139 .owner = THIS_MODULE,
140};
141
142struct nft_tunnel_opts {
143 union {
144 struct vxlan_metadata vxlan;
145 struct erspan_metadata erspan;
146 u8 data[IP_TUNNEL_OPTS_MAX];
147 } u;
148 u32 len;
149 __be16 flags;
150};
151
152struct nft_tunnel_obj {
153 struct metadata_dst *md;
154 struct nft_tunnel_opts opts;
155};
156
157static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = {
158 [NFTA_TUNNEL_KEY_IP_SRC] = { .type = NLA_U32 },
159 [NFTA_TUNNEL_KEY_IP_DST] = { .type = NLA_U32 },
160};
161
162static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx,
163 const struct nlattr *attr,
164 struct ip_tunnel_info *info)
165{
166 struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
167 int err;
168
169 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr,
170 nft_tunnel_ip_policy, NULL);
171 if (err < 0)
172 return err;
173
174 if (!tb[NFTA_TUNNEL_KEY_IP_DST])
175 return -EINVAL;
176
177 if (tb[NFTA_TUNNEL_KEY_IP_SRC])
178 info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]);
179 if (tb[NFTA_TUNNEL_KEY_IP_DST])
180 info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]);
181
182 return 0;
183}
184
185static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = {
186 [NFTA_TUNNEL_KEY_IP6_SRC] = { .len = sizeof(struct in6_addr), },
187 [NFTA_TUNNEL_KEY_IP6_DST] = { .len = sizeof(struct in6_addr), },
188 [NFTA_TUNNEL_KEY_IP6_FLOWLABEL] = { .type = NLA_U32, }
189};
190
191static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx,
192 const struct nlattr *attr,
193 struct ip_tunnel_info *info)
194{
195 struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
196 int err;
197
198 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr,
199 nft_tunnel_ip6_policy, NULL);
200 if (err < 0)
201 return err;
202
203 if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
204 return -EINVAL;
205
206 if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
207 memcpy(&info->key.u.ipv6.src,
208 nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
209 sizeof(struct in6_addr));
210 }
211 if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
212 memcpy(&info->key.u.ipv6.dst,
213 nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
214 sizeof(struct in6_addr));
215 }
216 if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
217 info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
218
219 info->mode |= IP_TUNNEL_INFO_IPV6;
220
221 return 0;
222}
223
224static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = {
225 [NFTA_TUNNEL_KEY_VXLAN_GBP] = { .type = NLA_U32 },
226};
227
228static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
229 struct nft_tunnel_opts *opts)
230{
231 struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
232 int err;
233
234 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr,
235 nft_tunnel_opts_vxlan_policy, NULL);
236 if (err < 0)
237 return err;
238
239 if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
240 return -EINVAL;
241
242 opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
243
244 opts->len = sizeof(struct vxlan_metadata);
245 opts->flags = TUNNEL_VXLAN_OPT;
246
247 return 0;
248}
249
250static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
251 [NFTA_TUNNEL_KEY_ERSPAN_VERSION] = { .type = NLA_U32 },
252 [NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 },
253 [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 },
254 [NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 },
255};
256
257static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
258 struct nft_tunnel_opts *opts)
259{
260 struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
261 uint8_t hwid, dir;
262 int err, version;
263
264 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX,
265 attr, nft_tunnel_opts_erspan_policy,
266 NULL);
267 if (err < 0)
268 return err;
269
270 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
271 return -EINVAL;
272
273 version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
274 switch (version) {
275 case ERSPAN_VERSION:
276 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
277 return -EINVAL;
278
279 opts->u.erspan.u.index =
280 nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
281 break;
282 case ERSPAN_VERSION2:
283 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
284 !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
285 return -EINVAL;
286
287 hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
288 dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
289
290 set_hwid(&opts->u.erspan.u.md2, hwid);
291 opts->u.erspan.u.md2.dir = dir;
292 break;
293 default:
294 return -EOPNOTSUPP;
295 }
296 opts->u.erspan.version = version;
297
298 opts->len = sizeof(struct erspan_metadata);
299 opts->flags = TUNNEL_ERSPAN_OPT;
300
301 return 0;
302}
303
304static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = {
305 [NFTA_TUNNEL_KEY_GENEVE_CLASS] = { .type = NLA_U16 },
306 [NFTA_TUNNEL_KEY_GENEVE_TYPE] = { .type = NLA_U8 },
307 [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 },
308};
309
310static int nft_tunnel_obj_geneve_init(const struct nlattr *attr,
311 struct nft_tunnel_opts *opts)
312{
313 struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len;
314 struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1];
315 int err, data_len;
316
317 err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_GENEVE_MAX, attr,
318 nft_tunnel_opts_geneve_policy, NULL);
319 if (err < 0)
320 return err;
321
322 if (!tb[NFTA_TUNNEL_KEY_GENEVE_CLASS] ||
323 !tb[NFTA_TUNNEL_KEY_GENEVE_TYPE] ||
324 !tb[NFTA_TUNNEL_KEY_GENEVE_DATA])
325 return -EINVAL;
326
327 attr = tb[NFTA_TUNNEL_KEY_GENEVE_DATA];
328 data_len = nla_len(attr);
329 if (data_len % 4)
330 return -EINVAL;
331
332 opts->len += sizeof(*opt) + data_len;
333 if (opts->len > IP_TUNNEL_OPTS_MAX)
334 return -EINVAL;
335
336 memcpy(opt->opt_data, nla_data(attr), data_len);
337 opt->length = data_len / 4;
338 opt->opt_class = nla_get_be16(tb[NFTA_TUNNEL_KEY_GENEVE_CLASS]);
339 opt->type = nla_get_u8(tb[NFTA_TUNNEL_KEY_GENEVE_TYPE]);
340 opts->flags = TUNNEL_GENEVE_OPT;
341
342 return 0;
343}
344
345static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
346 [NFTA_TUNNEL_KEY_OPTS_UNSPEC] = {
347 .strict_start_type = NFTA_TUNNEL_KEY_OPTS_GENEVE },
348 [NFTA_TUNNEL_KEY_OPTS_VXLAN] = { .type = NLA_NESTED, },
349 [NFTA_TUNNEL_KEY_OPTS_ERSPAN] = { .type = NLA_NESTED, },
350 [NFTA_TUNNEL_KEY_OPTS_GENEVE] = { .type = NLA_NESTED, },
351};
352
353static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
354 const struct nlattr *attr,
355 struct ip_tunnel_info *info,
356 struct nft_tunnel_opts *opts)
357{
358 int err, rem, type = 0;
359 struct nlattr *nla;
360
361 err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX,
362 nft_tunnel_opts_policy, NULL);
363 if (err < 0)
364 return err;
365
366 nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
367 switch (nla_type(nla)) {
368 case NFTA_TUNNEL_KEY_OPTS_VXLAN:
369 if (type)
370 return -EINVAL;
371 err = nft_tunnel_obj_vxlan_init(nla, opts);
372 if (err)
373 return err;
374 type = TUNNEL_VXLAN_OPT;
375 break;
376 case NFTA_TUNNEL_KEY_OPTS_ERSPAN:
377 if (type)
378 return -EINVAL;
379 err = nft_tunnel_obj_erspan_init(nla, opts);
380 if (err)
381 return err;
382 type = TUNNEL_ERSPAN_OPT;
383 break;
384 case NFTA_TUNNEL_KEY_OPTS_GENEVE:
385 if (type && type != TUNNEL_GENEVE_OPT)
386 return -EINVAL;
387 err = nft_tunnel_obj_geneve_init(nla, opts);
388 if (err)
389 return err;
390 type = TUNNEL_GENEVE_OPT;
391 break;
392 default:
393 return -EOPNOTSUPP;
394 }
395 }
396
397 return err;
398}
399
400static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = {
401 [NFTA_TUNNEL_KEY_IP] = { .type = NLA_NESTED, },
402 [NFTA_TUNNEL_KEY_IP6] = { .type = NLA_NESTED, },
403 [NFTA_TUNNEL_KEY_ID] = { .type = NLA_U32, },
404 [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
405 [NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, },
406 [NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, },
407 [NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, },
408 [NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, },
409 [NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, },
410};
411
412static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
413 const struct nlattr * const tb[],
414 struct nft_object *obj)
415{
416 struct nft_tunnel_obj *priv = nft_obj_data(obj);
417 struct ip_tunnel_info info;
418 struct metadata_dst *md;
419 int err;
420
421 if (!tb[NFTA_TUNNEL_KEY_ID])
422 return -EINVAL;
423
424 memset(&info, 0, sizeof(info));
425 info.mode = IP_TUNNEL_INFO_TX;
426 info.key.tun_id = key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
427 info.key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
428
429 if (tb[NFTA_TUNNEL_KEY_IP]) {
430 err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
431 if (err < 0)
432 return err;
433 } else if (tb[NFTA_TUNNEL_KEY_IP6]) {
434 err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info);
435 if (err < 0)
436 return err;
437 } else {
438 return -EINVAL;
439 }
440
441 if (tb[NFTA_TUNNEL_KEY_SPORT]) {
442 info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]);
443 }
444 if (tb[NFTA_TUNNEL_KEY_DPORT]) {
445 info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]);
446 }
447
448 if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
449 u32 tun_flags;
450
451 tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
452 if (tun_flags & ~NFT_TUNNEL_F_MASK)
453 return -EOPNOTSUPP;
454
455 if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
456 info.key.tun_flags &= ~TUNNEL_CSUM;
457 if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
458 info.key.tun_flags |= TUNNEL_DONT_FRAGMENT;
459 if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
460 info.key.tun_flags |= TUNNEL_SEQ;
461 }
462 if (tb[NFTA_TUNNEL_KEY_TOS])
463 info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
464 if (tb[NFTA_TUNNEL_KEY_TTL])
465 info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]);
466 else
467 info.key.ttl = U8_MAX;
468
469 if (tb[NFTA_TUNNEL_KEY_OPTS]) {
470 err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
471 &info, &priv->opts);
472 if (err < 0)
473 return err;
474 }
475
476 md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
477 if (!md)
478 return -ENOMEM;
479
480 memcpy(&md->u.tun_info, &info, sizeof(info));
481#ifdef CONFIG_DST_CACHE
482 err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
483 if (err < 0) {
484 metadata_dst_free(md);
485 return err;
486 }
487#endif
488 ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
489 priv->opts.flags);
490 priv->md = md;
491
492 return 0;
493}
494
495static inline void nft_tunnel_obj_eval(struct nft_object *obj,
496 struct nft_regs *regs,
497 const struct nft_pktinfo *pkt)
498{
499 struct nft_tunnel_obj *priv = nft_obj_data(obj);
500 struct sk_buff *skb = pkt->skb;
501
502 skb_dst_drop(skb);
503 dst_hold((struct dst_entry *) priv->md);
504 skb_dst_set(skb, (struct dst_entry *) priv->md);
505}
506
507static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
508{
509 struct nlattr *nest;
510
511 if (info->mode & IP_TUNNEL_INFO_IPV6) {
512 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP6);
513 if (!nest)
514 return -1;
515
516 if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC,
517 &info->key.u.ipv6.src) < 0 ||
518 nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST,
519 &info->key.u.ipv6.dst) < 0 ||
520 nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
521 info->key.label)) {
522 nla_nest_cancel(skb, nest);
523 return -1;
524 }
525
526 nla_nest_end(skb, nest);
527 } else {
528 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP);
529 if (!nest)
530 return -1;
531
532 if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC,
533 info->key.u.ipv4.src) < 0 ||
534 nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST,
535 info->key.u.ipv4.dst) < 0) {
536 nla_nest_cancel(skb, nest);
537 return -1;
538 }
539
540 nla_nest_end(skb, nest);
541 }
542
543 return 0;
544}
545
546static int nft_tunnel_opts_dump(struct sk_buff *skb,
547 struct nft_tunnel_obj *priv)
548{
549 struct nft_tunnel_opts *opts = &priv->opts;
550 struct nlattr *nest, *inner;
551
552 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS);
553 if (!nest)
554 return -1;
555
556 if (opts->flags & TUNNEL_VXLAN_OPT) {
557 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN);
558 if (!inner)
559 goto failure;
560 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
561 htonl(opts->u.vxlan.gbp)))
562 goto inner_failure;
563 nla_nest_end(skb, inner);
564 } else if (opts->flags & TUNNEL_ERSPAN_OPT) {
565 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN);
566 if (!inner)
567 goto failure;
568 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_VERSION,
569 htonl(opts->u.erspan.version)))
570 goto inner_failure;
571 switch (opts->u.erspan.version) {
572 case ERSPAN_VERSION:
573 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
574 opts->u.erspan.u.index))
575 goto inner_failure;
576 break;
577 case ERSPAN_VERSION2:
578 if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
579 get_hwid(&opts->u.erspan.u.md2)) ||
580 nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
581 opts->u.erspan.u.md2.dir))
582 goto inner_failure;
583 break;
584 }
585 nla_nest_end(skb, inner);
586 } else if (opts->flags & TUNNEL_GENEVE_OPT) {
587 struct geneve_opt *opt;
588 int offset = 0;
589
590 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_GENEVE);
591 if (!inner)
592 goto failure;
593 while (opts->len > offset) {
594 opt = (struct geneve_opt *)opts->u.data + offset;
595 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS,
596 opt->opt_class) ||
597 nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE,
598 opt->type) ||
599 nla_put(skb, NFTA_TUNNEL_KEY_GENEVE_DATA,
600 opt->length * 4, opt->opt_data))
601 goto inner_failure;
602 offset += sizeof(*opt) + opt->length * 4;
603 }
604 nla_nest_end(skb, inner);
605 }
606 nla_nest_end(skb, nest);
607 return 0;
608
609inner_failure:
610 nla_nest_cancel(skb, inner);
611failure:
612 nla_nest_cancel(skb, nest);
613 return -1;
614}
615
616static int nft_tunnel_ports_dump(struct sk_buff *skb,
617 struct ip_tunnel_info *info)
618{
619 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
620 nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
621 return -1;
622
623 return 0;
624}
625
626static int nft_tunnel_flags_dump(struct sk_buff *skb,
627 struct ip_tunnel_info *info)
628{
629 u32 flags = 0;
630
631 if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
632 flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
633 if (!(info->key.tun_flags & TUNNEL_CSUM))
634 flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
635 if (info->key.tun_flags & TUNNEL_SEQ)
636 flags |= NFT_TUNNEL_F_SEQ_NUMBER;
637
638 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
639 return -1;
640
641 return 0;
642}
643
644static int nft_tunnel_obj_dump(struct sk_buff *skb,
645 struct nft_object *obj, bool reset)
646{
647 struct nft_tunnel_obj *priv = nft_obj_data(obj);
648 struct ip_tunnel_info *info = &priv->md->u.tun_info;
649
650 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID,
651 tunnel_id_to_key32(info->key.tun_id)) ||
652 nft_tunnel_ip_dump(skb, info) < 0 ||
653 nft_tunnel_ports_dump(skb, info) < 0 ||
654 nft_tunnel_flags_dump(skb, info) < 0 ||
655 nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) ||
656 nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) ||
657 nft_tunnel_opts_dump(skb, priv) < 0)
658 goto nla_put_failure;
659
660 return 0;
661
662nla_put_failure:
663 return -1;
664}
665
666static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx,
667 struct nft_object *obj)
668{
669 struct nft_tunnel_obj *priv = nft_obj_data(obj);
670
671 metadata_dst_free(priv->md);
672}
673
674static struct nft_object_type nft_tunnel_obj_type;
675static const struct nft_object_ops nft_tunnel_obj_ops = {
676 .type = &nft_tunnel_obj_type,
677 .size = sizeof(struct nft_tunnel_obj),
678 .eval = nft_tunnel_obj_eval,
679 .init = nft_tunnel_obj_init,
680 .destroy = nft_tunnel_obj_destroy,
681 .dump = nft_tunnel_obj_dump,
682};
683
684static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
685 .type = NFT_OBJECT_TUNNEL,
686 .ops = &nft_tunnel_obj_ops,
687 .maxattr = NFTA_TUNNEL_KEY_MAX,
688 .policy = nft_tunnel_key_policy,
689 .owner = THIS_MODULE,
690};
691
692static int __init nft_tunnel_module_init(void)
693{
694 int err;
695
696 err = nft_register_expr(&nft_tunnel_type);
697 if (err < 0)
698 return err;
699
700 err = nft_register_obj(&nft_tunnel_obj_type);
701 if (err < 0)
702 nft_unregister_expr(&nft_tunnel_type);
703
704 return err;
705}
706
707static void __exit nft_tunnel_module_exit(void)
708{
709 nft_unregister_obj(&nft_tunnel_obj_type);
710 nft_unregister_expr(&nft_tunnel_type);
711}
712
713module_init(nft_tunnel_module_init);
714module_exit(nft_tunnel_module_exit);
715
716MODULE_LICENSE("GPL");
717MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
718MODULE_ALIAS_NFT_EXPR("tunnel");
719MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
720MODULE_DESCRIPTION("nftables tunnel expression support");
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/module.h>
5#include <linux/seqlock.h>
6#include <linux/netlink.h>
7#include <linux/netfilter.h>
8#include <linux/netfilter/nf_tables.h>
9#include <net/netfilter/nf_tables.h>
10#include <net/dst_metadata.h>
11#include <net/ip_tunnels.h>
12#include <net/vxlan.h>
13#include <net/erspan.h>
14
15struct nft_tunnel {
16 enum nft_tunnel_keys key:8;
17 enum nft_registers dreg:8;
18 enum nft_tunnel_mode mode:8;
19};
20
21static void nft_tunnel_get_eval(const struct nft_expr *expr,
22 struct nft_regs *regs,
23 const struct nft_pktinfo *pkt)
24{
25 const struct nft_tunnel *priv = nft_expr_priv(expr);
26 u32 *dest = ®s->data[priv->dreg];
27 struct ip_tunnel_info *tun_info;
28
29 tun_info = skb_tunnel_info(pkt->skb);
30
31 switch (priv->key) {
32 case NFT_TUNNEL_PATH:
33 if (!tun_info) {
34 nft_reg_store8(dest, false);
35 return;
36 }
37 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
38 (priv->mode == NFT_TUNNEL_MODE_RX &&
39 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
40 (priv->mode == NFT_TUNNEL_MODE_TX &&
41 (tun_info->mode & IP_TUNNEL_INFO_TX)))
42 nft_reg_store8(dest, true);
43 else
44 nft_reg_store8(dest, false);
45 break;
46 case NFT_TUNNEL_ID:
47 if (!tun_info) {
48 regs->verdict.code = NFT_BREAK;
49 return;
50 }
51 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
52 (priv->mode == NFT_TUNNEL_MODE_RX &&
53 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
54 (priv->mode == NFT_TUNNEL_MODE_TX &&
55 (tun_info->mode & IP_TUNNEL_INFO_TX)))
56 *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
57 else
58 regs->verdict.code = NFT_BREAK;
59 break;
60 default:
61 WARN_ON(1);
62 regs->verdict.code = NFT_BREAK;
63 }
64}
65
66static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
67 [NFTA_TUNNEL_KEY] = { .type = NLA_U32 },
68 [NFTA_TUNNEL_DREG] = { .type = NLA_U32 },
69 [NFTA_TUNNEL_MODE] = { .type = NLA_U32 },
70};
71
72static int nft_tunnel_get_init(const struct nft_ctx *ctx,
73 const struct nft_expr *expr,
74 const struct nlattr * const tb[])
75{
76 struct nft_tunnel *priv = nft_expr_priv(expr);
77 u32 len;
78
79 if (!tb[NFTA_TUNNEL_KEY] &&
80 !tb[NFTA_TUNNEL_DREG])
81 return -EINVAL;
82
83 priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
84 switch (priv->key) {
85 case NFT_TUNNEL_PATH:
86 len = sizeof(u8);
87 break;
88 case NFT_TUNNEL_ID:
89 len = sizeof(u32);
90 break;
91 default:
92 return -EOPNOTSUPP;
93 }
94
95 priv->dreg = nft_parse_register(tb[NFTA_TUNNEL_DREG]);
96
97 if (tb[NFTA_TUNNEL_MODE]) {
98 priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
99 if (priv->mode > NFT_TUNNEL_MODE_MAX)
100 return -EOPNOTSUPP;
101 } else {
102 priv->mode = NFT_TUNNEL_MODE_NONE;
103 }
104
105 return nft_validate_register_store(ctx, priv->dreg, NULL,
106 NFT_DATA_VALUE, len);
107}
108
109static int nft_tunnel_get_dump(struct sk_buff *skb,
110 const struct nft_expr *expr)
111{
112 const struct nft_tunnel *priv = nft_expr_priv(expr);
113
114 if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key)))
115 goto nla_put_failure;
116 if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
117 goto nla_put_failure;
118 if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode)))
119 goto nla_put_failure;
120 return 0;
121
122nla_put_failure:
123 return -1;
124}
125
126static struct nft_expr_type nft_tunnel_type;
127static const struct nft_expr_ops nft_tunnel_get_ops = {
128 .type = &nft_tunnel_type,
129 .size = NFT_EXPR_SIZE(sizeof(struct nft_tunnel)),
130 .eval = nft_tunnel_get_eval,
131 .init = nft_tunnel_get_init,
132 .dump = nft_tunnel_get_dump,
133};
134
135static struct nft_expr_type nft_tunnel_type __read_mostly = {
136 .name = "tunnel",
137 .ops = &nft_tunnel_get_ops,
138 .policy = nft_tunnel_policy,
139 .maxattr = NFTA_TUNNEL_MAX,
140 .owner = THIS_MODULE,
141};
142
143struct nft_tunnel_opts {
144 union {
145 struct vxlan_metadata vxlan;
146 struct erspan_metadata erspan;
147 } u;
148 u32 len;
149 __be16 flags;
150};
151
152struct nft_tunnel_obj {
153 struct metadata_dst *md;
154 struct nft_tunnel_opts opts;
155};
156
157static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = {
158 [NFTA_TUNNEL_KEY_IP_SRC] = { .type = NLA_U32 },
159 [NFTA_TUNNEL_KEY_IP_DST] = { .type = NLA_U32 },
160};
161
162static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx,
163 const struct nlattr *attr,
164 struct ip_tunnel_info *info)
165{
166 struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
167 int err;
168
169 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr,
170 nft_tunnel_ip_policy, NULL);
171 if (err < 0)
172 return err;
173
174 if (!tb[NFTA_TUNNEL_KEY_IP_DST])
175 return -EINVAL;
176
177 if (tb[NFTA_TUNNEL_KEY_IP_SRC])
178 info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]);
179 if (tb[NFTA_TUNNEL_KEY_IP_DST])
180 info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]);
181
182 return 0;
183}
184
185static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = {
186 [NFTA_TUNNEL_KEY_IP6_SRC] = { .len = sizeof(struct in6_addr), },
187 [NFTA_TUNNEL_KEY_IP6_DST] = { .len = sizeof(struct in6_addr), },
188 [NFTA_TUNNEL_KEY_IP6_FLOWLABEL] = { .type = NLA_U32, }
189};
190
191static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx,
192 const struct nlattr *attr,
193 struct ip_tunnel_info *info)
194{
195 struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
196 int err;
197
198 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr,
199 nft_tunnel_ip6_policy, NULL);
200 if (err < 0)
201 return err;
202
203 if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
204 return -EINVAL;
205
206 if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
207 memcpy(&info->key.u.ipv6.src,
208 nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
209 sizeof(struct in6_addr));
210 }
211 if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
212 memcpy(&info->key.u.ipv6.dst,
213 nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
214 sizeof(struct in6_addr));
215 }
216 if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
217 info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
218
219 info->mode |= IP_TUNNEL_INFO_IPV6;
220
221 return 0;
222}
223
224static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = {
225 [NFTA_TUNNEL_KEY_VXLAN_GBP] = { .type = NLA_U32 },
226};
227
228static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
229 struct nft_tunnel_opts *opts)
230{
231 struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
232 int err;
233
234 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr,
235 nft_tunnel_opts_vxlan_policy, NULL);
236 if (err < 0)
237 return err;
238
239 if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
240 return -EINVAL;
241
242 opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
243
244 opts->len = sizeof(struct vxlan_metadata);
245 opts->flags = TUNNEL_VXLAN_OPT;
246
247 return 0;
248}
249
250static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
251 [NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 },
252 [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 },
253 [NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 },
254};
255
256static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
257 struct nft_tunnel_opts *opts)
258{
259 struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
260 uint8_t hwid, dir;
261 int err, version;
262
263 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX,
264 attr, nft_tunnel_opts_erspan_policy,
265 NULL);
266 if (err < 0)
267 return err;
268
269 version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
270 switch (version) {
271 case ERSPAN_VERSION:
272 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
273 return -EINVAL;
274
275 opts->u.erspan.u.index =
276 nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
277 break;
278 case ERSPAN_VERSION2:
279 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
280 !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
281 return -EINVAL;
282
283 hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
284 dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
285
286 set_hwid(&opts->u.erspan.u.md2, hwid);
287 opts->u.erspan.u.md2.dir = dir;
288 break;
289 default:
290 return -EOPNOTSUPP;
291 }
292 opts->u.erspan.version = version;
293
294 opts->len = sizeof(struct erspan_metadata);
295 opts->flags = TUNNEL_ERSPAN_OPT;
296
297 return 0;
298}
299
300static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
301 [NFTA_TUNNEL_KEY_OPTS_VXLAN] = { .type = NLA_NESTED, },
302 [NFTA_TUNNEL_KEY_OPTS_ERSPAN] = { .type = NLA_NESTED, },
303};
304
305static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
306 const struct nlattr *attr,
307 struct ip_tunnel_info *info,
308 struct nft_tunnel_opts *opts)
309{
310 struct nlattr *tb[NFTA_TUNNEL_KEY_OPTS_MAX + 1];
311 int err;
312
313 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_OPTS_MAX, attr,
314 nft_tunnel_opts_policy, NULL);
315 if (err < 0)
316 return err;
317
318 if (tb[NFTA_TUNNEL_KEY_OPTS_VXLAN]) {
319 err = nft_tunnel_obj_vxlan_init(tb[NFTA_TUNNEL_KEY_OPTS_VXLAN],
320 opts);
321 } else if (tb[NFTA_TUNNEL_KEY_OPTS_ERSPAN]) {
322 err = nft_tunnel_obj_erspan_init(tb[NFTA_TUNNEL_KEY_OPTS_ERSPAN],
323 opts);
324 } else {
325 return -EOPNOTSUPP;
326 }
327
328 return err;
329}
330
331static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = {
332 [NFTA_TUNNEL_KEY_IP] = { .type = NLA_NESTED, },
333 [NFTA_TUNNEL_KEY_IP6] = { .type = NLA_NESTED, },
334 [NFTA_TUNNEL_KEY_ID] = { .type = NLA_U32, },
335 [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
336 [NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, },
337 [NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, },
338 [NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, },
339};
340
341static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
342 const struct nlattr * const tb[],
343 struct nft_object *obj)
344{
345 struct nft_tunnel_obj *priv = nft_obj_data(obj);
346 struct ip_tunnel_info info;
347 struct metadata_dst *md;
348 int err;
349
350 if (!tb[NFTA_TUNNEL_KEY_ID])
351 return -EINVAL;
352
353 memset(&info, 0, sizeof(info));
354 info.mode = IP_TUNNEL_INFO_TX;
355 info.key.tun_id = key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
356 info.key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
357
358 if (tb[NFTA_TUNNEL_KEY_IP]) {
359 err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
360 if (err < 0)
361 return err;
362 } else if (tb[NFTA_TUNNEL_KEY_IP6]) {
363 err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info);
364 if (err < 0)
365 return err;
366 } else {
367 return -EINVAL;
368 }
369
370 if (tb[NFTA_TUNNEL_KEY_SPORT]) {
371 info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]);
372 }
373 if (tb[NFTA_TUNNEL_KEY_DPORT]) {
374 info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]);
375 }
376
377 if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
378 u32 tun_flags;
379
380 tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
381 if (tun_flags & ~NFT_TUNNEL_F_MASK)
382 return -EOPNOTSUPP;
383
384 if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
385 info.key.tun_flags &= ~TUNNEL_CSUM;
386 if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
387 info.key.tun_flags |= TUNNEL_DONT_FRAGMENT;
388 if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
389 info.key.tun_flags |= TUNNEL_SEQ;
390 }
391 if (tb[NFTA_TUNNEL_KEY_TOS])
392 info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
393 if (tb[NFTA_TUNNEL_KEY_TTL])
394 info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]);
395 else
396 info.key.ttl = U8_MAX;
397
398 if (tb[NFTA_TUNNEL_KEY_OPTS]) {
399 err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
400 &info, &priv->opts);
401 if (err < 0)
402 return err;
403 }
404
405 md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
406 if (!md)
407 return -ENOMEM;
408
409 memcpy(&md->u.tun_info, &info, sizeof(info));
410#ifdef CONFIG_DST_CACHE
411 err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
412 if (err < 0) {
413 metadata_dst_free(md);
414 return err;
415 }
416#endif
417 ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
418 priv->opts.flags);
419 priv->md = md;
420
421 return 0;
422}
423
424static inline void nft_tunnel_obj_eval(struct nft_object *obj,
425 struct nft_regs *regs,
426 const struct nft_pktinfo *pkt)
427{
428 struct nft_tunnel_obj *priv = nft_obj_data(obj);
429 struct sk_buff *skb = pkt->skb;
430
431 skb_dst_drop(skb);
432 dst_hold((struct dst_entry *) priv->md);
433 skb_dst_set(skb, (struct dst_entry *) priv->md);
434}
435
436static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
437{
438 struct nlattr *nest;
439
440 if (info->mode & IP_TUNNEL_INFO_IPV6) {
441 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP6);
442 if (!nest)
443 return -1;
444
445 if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC, &info->key.u.ipv6.src) < 0 ||
446 nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST, &info->key.u.ipv6.dst) < 0 ||
447 nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL, info->key.label))
448 return -1;
449
450 nla_nest_end(skb, nest);
451 } else {
452 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP);
453 if (!nest)
454 return -1;
455
456 if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC, info->key.u.ipv4.src) < 0 ||
457 nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST, info->key.u.ipv4.dst) < 0)
458 return -1;
459
460 nla_nest_end(skb, nest);
461 }
462
463 return 0;
464}
465
466static int nft_tunnel_opts_dump(struct sk_buff *skb,
467 struct nft_tunnel_obj *priv)
468{
469 struct nft_tunnel_opts *opts = &priv->opts;
470 struct nlattr *nest;
471
472 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS);
473 if (!nest)
474 return -1;
475
476 if (opts->flags & TUNNEL_VXLAN_OPT) {
477 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
478 htonl(opts->u.vxlan.gbp)))
479 return -1;
480 } else if (opts->flags & TUNNEL_ERSPAN_OPT) {
481 switch (opts->u.erspan.version) {
482 case ERSPAN_VERSION:
483 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
484 opts->u.erspan.u.index))
485 return -1;
486 break;
487 case ERSPAN_VERSION2:
488 if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
489 get_hwid(&opts->u.erspan.u.md2)) ||
490 nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
491 opts->u.erspan.u.md2.dir))
492 return -1;
493 break;
494 }
495 }
496 nla_nest_end(skb, nest);
497
498 return 0;
499}
500
501static int nft_tunnel_ports_dump(struct sk_buff *skb,
502 struct ip_tunnel_info *info)
503{
504 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, htons(info->key.tp_src)) < 0 ||
505 nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, htons(info->key.tp_dst)) < 0)
506 return -1;
507
508 return 0;
509}
510
511static int nft_tunnel_flags_dump(struct sk_buff *skb,
512 struct ip_tunnel_info *info)
513{
514 u32 flags = 0;
515
516 if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
517 flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
518 if (!(info->key.tun_flags & TUNNEL_CSUM))
519 flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
520 if (info->key.tun_flags & TUNNEL_SEQ)
521 flags |= NFT_TUNNEL_F_SEQ_NUMBER;
522
523 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
524 return -1;
525
526 return 0;
527}
528
529static int nft_tunnel_obj_dump(struct sk_buff *skb,
530 struct nft_object *obj, bool reset)
531{
532 struct nft_tunnel_obj *priv = nft_obj_data(obj);
533 struct ip_tunnel_info *info = &priv->md->u.tun_info;
534
535 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID,
536 tunnel_id_to_key32(info->key.tun_id)) ||
537 nft_tunnel_ip_dump(skb, info) < 0 ||
538 nft_tunnel_ports_dump(skb, info) < 0 ||
539 nft_tunnel_flags_dump(skb, info) < 0 ||
540 nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) ||
541 nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) ||
542 nft_tunnel_opts_dump(skb, priv) < 0)
543 goto nla_put_failure;
544
545 return 0;
546
547nla_put_failure:
548 return -1;
549}
550
551static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx,
552 struct nft_object *obj)
553{
554 struct nft_tunnel_obj *priv = nft_obj_data(obj);
555
556 metadata_dst_free(priv->md);
557}
558
559static struct nft_object_type nft_tunnel_obj_type;
560static const struct nft_object_ops nft_tunnel_obj_ops = {
561 .type = &nft_tunnel_obj_type,
562 .size = sizeof(struct nft_tunnel_obj),
563 .eval = nft_tunnel_obj_eval,
564 .init = nft_tunnel_obj_init,
565 .destroy = nft_tunnel_obj_destroy,
566 .dump = nft_tunnel_obj_dump,
567};
568
569static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
570 .type = NFT_OBJECT_TUNNEL,
571 .ops = &nft_tunnel_obj_ops,
572 .maxattr = NFTA_TUNNEL_KEY_MAX,
573 .policy = nft_tunnel_key_policy,
574 .owner = THIS_MODULE,
575};
576
577static int __init nft_tunnel_module_init(void)
578{
579 int err;
580
581 err = nft_register_expr(&nft_tunnel_type);
582 if (err < 0)
583 return err;
584
585 err = nft_register_obj(&nft_tunnel_obj_type);
586 if (err < 0)
587 nft_unregister_expr(&nft_tunnel_type);
588
589 return err;
590}
591
592static void __exit nft_tunnel_module_exit(void)
593{
594 nft_unregister_obj(&nft_tunnel_obj_type);
595 nft_unregister_expr(&nft_tunnel_type);
596}
597
598module_init(nft_tunnel_module_init);
599module_exit(nft_tunnel_module_exit);
600
601MODULE_LICENSE("GPL");
602MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
603MODULE_ALIAS_NFT_EXPR("tunnel");
604MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);