Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
4 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
5 */
6
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/kernel.h>
10#include <linux/skbuff.h>
11#include <linux/rtnetlink.h>
12#include <net/geneve.h>
13#include <net/netlink.h>
14#include <net/pkt_sched.h>
15#include <net/dst.h>
16#include <net/pkt_cls.h>
17
18#include <linux/tc_act/tc_tunnel_key.h>
19#include <net/tc_act/tc_tunnel_key.h>
20
21static unsigned int tunnel_key_net_id;
22static struct tc_action_ops act_tunnel_key_ops;
23
24static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
25 struct tcf_result *res)
26{
27 struct tcf_tunnel_key *t = to_tunnel_key(a);
28 struct tcf_tunnel_key_params *params;
29 int action;
30
31 params = rcu_dereference_bh(t->params);
32
33 tcf_lastuse_update(&t->tcf_tm);
34 bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb);
35 action = READ_ONCE(t->tcf_action);
36
37 switch (params->tcft_action) {
38 case TCA_TUNNEL_KEY_ACT_RELEASE:
39 skb_dst_drop(skb);
40 break;
41 case TCA_TUNNEL_KEY_ACT_SET:
42 skb_dst_drop(skb);
43 skb_dst_set(skb, dst_clone(¶ms->tcft_enc_metadata->dst));
44 break;
45 default:
46 WARN_ONCE(1, "Bad tunnel_key action %d.\n",
47 params->tcft_action);
48 break;
49 }
50
51 return action;
52}
53
54static const struct nla_policy
55enc_opts_policy[TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1] = {
56 [TCA_TUNNEL_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
57};
58
59static const struct nla_policy
60geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = {
61 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
62 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
63 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
64 .len = 128 },
65};
66
67static int
68tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len,
69 struct netlink_ext_ack *extack)
70{
71 struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1];
72 int err, data_len, opt_len;
73 u8 *data;
74
75 err = nla_parse_nested_deprecated(tb,
76 TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX,
77 nla, geneve_opt_policy, extack);
78 if (err < 0)
79 return err;
80
81 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] ||
82 !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] ||
83 !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]) {
84 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
85 return -EINVAL;
86 }
87
88 data = nla_data(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
89 data_len = nla_len(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
90 if (data_len < 4) {
91 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
92 return -ERANGE;
93 }
94 if (data_len % 4) {
95 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
96 return -ERANGE;
97 }
98
99 opt_len = sizeof(struct geneve_opt) + data_len;
100 if (dst) {
101 struct geneve_opt *opt = dst;
102
103 WARN_ON(dst_len < opt_len);
104
105 opt->opt_class =
106 nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]);
107 opt->type = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]);
108 opt->length = data_len / 4; /* length is in units of 4 bytes */
109 opt->r1 = 0;
110 opt->r2 = 0;
111 opt->r3 = 0;
112
113 memcpy(opt + 1, data, data_len);
114 }
115
116 return opt_len;
117}
118
119static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
120 int dst_len, struct netlink_ext_ack *extack)
121{
122 int err, rem, opt_len, len = nla_len(nla), opts_len = 0;
123 const struct nlattr *attr, *head = nla_data(nla);
124
125 err = nla_validate_deprecated(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX,
126 enc_opts_policy, extack);
127 if (err)
128 return err;
129
130 nla_for_each_attr(attr, head, len, rem) {
131 switch (nla_type(attr)) {
132 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
133 opt_len = tunnel_key_copy_geneve_opt(attr, dst,
134 dst_len, extack);
135 if (opt_len < 0)
136 return opt_len;
137 opts_len += opt_len;
138 if (opts_len > IP_TUNNEL_OPTS_MAX) {
139 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
140 return -EINVAL;
141 }
142 if (dst) {
143 dst_len -= opt_len;
144 dst += opt_len;
145 }
146 break;
147 }
148 }
149
150 if (!opts_len) {
151 NL_SET_ERR_MSG(extack, "Empty list of tunnel options");
152 return -EINVAL;
153 }
154
155 if (rem > 0) {
156 NL_SET_ERR_MSG(extack, "Trailing data after parsing tunnel key options attributes");
157 return -EINVAL;
158 }
159
160 return opts_len;
161}
162
163static int tunnel_key_get_opts_len(struct nlattr *nla,
164 struct netlink_ext_ack *extack)
165{
166 return tunnel_key_copy_opts(nla, NULL, 0, extack);
167}
168
169static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
170 int opts_len, struct netlink_ext_ack *extack)
171{
172 info->options_len = opts_len;
173 switch (nla_type(nla_data(nla))) {
174 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
175#if IS_ENABLED(CONFIG_INET)
176 info->key.tun_flags |= TUNNEL_GENEVE_OPT;
177 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
178 opts_len, extack);
179#else
180 return -EAFNOSUPPORT;
181#endif
182 default:
183 NL_SET_ERR_MSG(extack, "Cannot set tunnel options for unknown tunnel type");
184 return -EINVAL;
185 }
186}
187
188static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
189 [TCA_TUNNEL_KEY_PARMS] = { .len = sizeof(struct tc_tunnel_key) },
190 [TCA_TUNNEL_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
191 [TCA_TUNNEL_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
192 [TCA_TUNNEL_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
193 [TCA_TUNNEL_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
194 [TCA_TUNNEL_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
195 [TCA_TUNNEL_KEY_ENC_DST_PORT] = {.type = NLA_U16},
196 [TCA_TUNNEL_KEY_NO_CSUM] = { .type = NLA_U8 },
197 [TCA_TUNNEL_KEY_ENC_OPTS] = { .type = NLA_NESTED },
198 [TCA_TUNNEL_KEY_ENC_TOS] = { .type = NLA_U8 },
199 [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
200};
201
202static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
203{
204 if (!p)
205 return;
206 if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
207 dst_release(&p->tcft_enc_metadata->dst);
208
209 kfree_rcu(p, rcu);
210}
211
212static int tunnel_key_init(struct net *net, struct nlattr *nla,
213 struct nlattr *est, struct tc_action **a,
214 int ovr, int bind, bool rtnl_held,
215 struct tcf_proto *tp,
216 struct netlink_ext_ack *extack)
217{
218 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
219 struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
220 struct tcf_tunnel_key_params *params_new;
221 struct metadata_dst *metadata = NULL;
222 struct tcf_chain *goto_ch = NULL;
223 struct tc_tunnel_key *parm;
224 struct tcf_tunnel_key *t;
225 bool exists = false;
226 __be16 dst_port = 0;
227 __be64 key_id = 0;
228 int opts_len = 0;
229 __be16 flags = 0;
230 u8 tos, ttl;
231 int ret = 0;
232 u32 index;
233 int err;
234
235 if (!nla) {
236 NL_SET_ERR_MSG(extack, "Tunnel requires attributes to be passed");
237 return -EINVAL;
238 }
239
240 err = nla_parse_nested_deprecated(tb, TCA_TUNNEL_KEY_MAX, nla,
241 tunnel_key_policy, extack);
242 if (err < 0) {
243 NL_SET_ERR_MSG(extack, "Failed to parse nested tunnel key attributes");
244 return err;
245 }
246
247 if (!tb[TCA_TUNNEL_KEY_PARMS]) {
248 NL_SET_ERR_MSG(extack, "Missing tunnel key parameters");
249 return -EINVAL;
250 }
251
252 parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
253 index = parm->index;
254 err = tcf_idr_check_alloc(tn, &index, a, bind);
255 if (err < 0)
256 return err;
257 exists = err;
258 if (exists && bind)
259 return 0;
260
261 switch (parm->t_action) {
262 case TCA_TUNNEL_KEY_ACT_RELEASE:
263 break;
264 case TCA_TUNNEL_KEY_ACT_SET:
265 if (tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
266 __be32 key32;
267
268 key32 = nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]);
269 key_id = key32_to_tunnel_id(key32);
270 flags = TUNNEL_KEY;
271 }
272
273 flags |= TUNNEL_CSUM;
274 if (tb[TCA_TUNNEL_KEY_NO_CSUM] &&
275 nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM]))
276 flags &= ~TUNNEL_CSUM;
277
278 if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
279 dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);
280
281 if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) {
282 opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS],
283 extack);
284 if (opts_len < 0) {
285 ret = opts_len;
286 goto err_out;
287 }
288 }
289
290 tos = 0;
291 if (tb[TCA_TUNNEL_KEY_ENC_TOS])
292 tos = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TOS]);
293 ttl = 0;
294 if (tb[TCA_TUNNEL_KEY_ENC_TTL])
295 ttl = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TTL]);
296
297 if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
298 tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
299 __be32 saddr;
300 __be32 daddr;
301
302 saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]);
303 daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);
304
305 metadata = __ip_tun_set_dst(saddr, daddr, tos, ttl,
306 dst_port, flags,
307 key_id, opts_len);
308 } else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
309 tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
310 struct in6_addr saddr;
311 struct in6_addr daddr;
312
313 saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]);
314 daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);
315
316 metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
317 0, flags,
318 key_id, 0);
319 } else {
320 NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
321 ret = -EINVAL;
322 goto err_out;
323 }
324
325 if (!metadata) {
326 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel metadata dst");
327 ret = -ENOMEM;
328 goto err_out;
329 }
330
331#ifdef CONFIG_DST_CACHE
332 ret = dst_cache_init(&metadata->u.tun_info.dst_cache, GFP_KERNEL);
333 if (ret)
334 goto release_tun_meta;
335#endif
336
337 if (opts_len) {
338 ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS],
339 &metadata->u.tun_info,
340 opts_len, extack);
341 if (ret < 0)
342 goto release_tun_meta;
343 }
344
345 metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
346 break;
347 default:
348 NL_SET_ERR_MSG(extack, "Unknown tunnel key action");
349 ret = -EINVAL;
350 goto err_out;
351 }
352
353 if (!exists) {
354 ret = tcf_idr_create(tn, index, est, a,
355 &act_tunnel_key_ops, bind, true);
356 if (ret) {
357 NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
358 goto release_tun_meta;
359 }
360
361 ret = ACT_P_CREATED;
362 } else if (!ovr) {
363 NL_SET_ERR_MSG(extack, "TC IDR already exists");
364 ret = -EEXIST;
365 goto release_tun_meta;
366 }
367
368 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
369 if (err < 0) {
370 ret = err;
371 exists = true;
372 goto release_tun_meta;
373 }
374 t = to_tunnel_key(*a);
375
376 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
377 if (unlikely(!params_new)) {
378 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
379 ret = -ENOMEM;
380 exists = true;
381 goto put_chain;
382 }
383 params_new->tcft_action = parm->t_action;
384 params_new->tcft_enc_metadata = metadata;
385
386 spin_lock_bh(&t->tcf_lock);
387 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
388 rcu_swap_protected(t->params, params_new,
389 lockdep_is_held(&t->tcf_lock));
390 spin_unlock_bh(&t->tcf_lock);
391 tunnel_key_release_params(params_new);
392 if (goto_ch)
393 tcf_chain_put_by_act(goto_ch);
394
395 if (ret == ACT_P_CREATED)
396 tcf_idr_insert(tn, *a);
397
398 return ret;
399
400put_chain:
401 if (goto_ch)
402 tcf_chain_put_by_act(goto_ch);
403
404release_tun_meta:
405 if (metadata)
406 dst_release(&metadata->dst);
407
408err_out:
409 if (exists)
410 tcf_idr_release(*a, bind);
411 else
412 tcf_idr_cleanup(tn, index);
413 return ret;
414}
415
416static void tunnel_key_release(struct tc_action *a)
417{
418 struct tcf_tunnel_key *t = to_tunnel_key(a);
419 struct tcf_tunnel_key_params *params;
420
421 params = rcu_dereference_protected(t->params, 1);
422 tunnel_key_release_params(params);
423}
424
425static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
426 const struct ip_tunnel_info *info)
427{
428 int len = info->options_len;
429 u8 *src = (u8 *)(info + 1);
430 struct nlattr *start;
431
432 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE);
433 if (!start)
434 return -EMSGSIZE;
435
436 while (len > 0) {
437 struct geneve_opt *opt = (struct geneve_opt *)src;
438
439 if (nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS,
440 opt->opt_class) ||
441 nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
442 opt->type) ||
443 nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
444 opt->length * 4, opt + 1)) {
445 nla_nest_cancel(skb, start);
446 return -EMSGSIZE;
447 }
448
449 len -= sizeof(struct geneve_opt) + opt->length * 4;
450 src += sizeof(struct geneve_opt) + opt->length * 4;
451 }
452
453 nla_nest_end(skb, start);
454 return 0;
455}
456
457static int tunnel_key_opts_dump(struct sk_buff *skb,
458 const struct ip_tunnel_info *info)
459{
460 struct nlattr *start;
461 int err = -EINVAL;
462
463 if (!info->options_len)
464 return 0;
465
466 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS);
467 if (!start)
468 return -EMSGSIZE;
469
470 if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
471 err = tunnel_key_geneve_opts_dump(skb, info);
472 if (err)
473 goto err_out;
474 } else {
475err_out:
476 nla_nest_cancel(skb, start);
477 return err;
478 }
479
480 nla_nest_end(skb, start);
481 return 0;
482}
483
484static int tunnel_key_dump_addresses(struct sk_buff *skb,
485 const struct ip_tunnel_info *info)
486{
487 unsigned short family = ip_tunnel_info_af(info);
488
489 if (family == AF_INET) {
490 __be32 saddr = info->key.u.ipv4.src;
491 __be32 daddr = info->key.u.ipv4.dst;
492
493 if (!nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_SRC, saddr) &&
494 !nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_DST, daddr))
495 return 0;
496 }
497
498 if (family == AF_INET6) {
499 const struct in6_addr *saddr6 = &info->key.u.ipv6.src;
500 const struct in6_addr *daddr6 = &info->key.u.ipv6.dst;
501
502 if (!nla_put_in6_addr(skb,
503 TCA_TUNNEL_KEY_ENC_IPV6_SRC, saddr6) &&
504 !nla_put_in6_addr(skb,
505 TCA_TUNNEL_KEY_ENC_IPV6_DST, daddr6))
506 return 0;
507 }
508
509 return -EINVAL;
510}
511
512static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
513 int bind, int ref)
514{
515 unsigned char *b = skb_tail_pointer(skb);
516 struct tcf_tunnel_key *t = to_tunnel_key(a);
517 struct tcf_tunnel_key_params *params;
518 struct tc_tunnel_key opt = {
519 .index = t->tcf_index,
520 .refcnt = refcount_read(&t->tcf_refcnt) - ref,
521 .bindcnt = atomic_read(&t->tcf_bindcnt) - bind,
522 };
523 struct tcf_t tm;
524
525 spin_lock_bh(&t->tcf_lock);
526 params = rcu_dereference_protected(t->params,
527 lockdep_is_held(&t->tcf_lock));
528 opt.action = t->tcf_action;
529 opt.t_action = params->tcft_action;
530
531 if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
532 goto nla_put_failure;
533
534 if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
535 struct ip_tunnel_info *info =
536 ¶ms->tcft_enc_metadata->u.tun_info;
537 struct ip_tunnel_key *key = &info->key;
538 __be32 key_id = tunnel_id_to_key32(key->tun_id);
539
540 if (((key->tun_flags & TUNNEL_KEY) &&
541 nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id)) ||
542 tunnel_key_dump_addresses(skb,
543 ¶ms->tcft_enc_metadata->u.tun_info) ||
544 (key->tp_dst &&
545 nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT,
546 key->tp_dst)) ||
547 nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM,
548 !(key->tun_flags & TUNNEL_CSUM)) ||
549 tunnel_key_opts_dump(skb, info))
550 goto nla_put_failure;
551
552 if (key->tos && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TOS, key->tos))
553 goto nla_put_failure;
554
555 if (key->ttl && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TTL, key->ttl))
556 goto nla_put_failure;
557 }
558
559 tcf_tm_dump(&tm, &t->tcf_tm);
560 if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm),
561 &tm, TCA_TUNNEL_KEY_PAD))
562 goto nla_put_failure;
563 spin_unlock_bh(&t->tcf_lock);
564
565 return skb->len;
566
567nla_put_failure:
568 spin_unlock_bh(&t->tcf_lock);
569 nlmsg_trim(skb, b);
570 return -1;
571}
572
573static int tunnel_key_walker(struct net *net, struct sk_buff *skb,
574 struct netlink_callback *cb, int type,
575 const struct tc_action_ops *ops,
576 struct netlink_ext_ack *extack)
577{
578 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
579
580 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
581}
582
583static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index)
584{
585 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
586
587 return tcf_idr_search(tn, a, index);
588}
589
590static struct tc_action_ops act_tunnel_key_ops = {
591 .kind = "tunnel_key",
592 .id = TCA_ID_TUNNEL_KEY,
593 .owner = THIS_MODULE,
594 .act = tunnel_key_act,
595 .dump = tunnel_key_dump,
596 .init = tunnel_key_init,
597 .cleanup = tunnel_key_release,
598 .walk = tunnel_key_walker,
599 .lookup = tunnel_key_search,
600 .size = sizeof(struct tcf_tunnel_key),
601};
602
603static __net_init int tunnel_key_init_net(struct net *net)
604{
605 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
606
607 return tc_action_net_init(net, tn, &act_tunnel_key_ops);
608}
609
610static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
611{
612 tc_action_net_exit(net_list, tunnel_key_net_id);
613}
614
615static struct pernet_operations tunnel_key_net_ops = {
616 .init = tunnel_key_init_net,
617 .exit_batch = tunnel_key_exit_net,
618 .id = &tunnel_key_net_id,
619 .size = sizeof(struct tc_action_net),
620};
621
622static int __init tunnel_key_init_module(void)
623{
624 return tcf_register_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
625}
626
627static void __exit tunnel_key_cleanup_module(void)
628{
629 tcf_unregister_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
630}
631
632module_init(tunnel_key_init_module);
633module_exit(tunnel_key_cleanup_module);
634
635MODULE_AUTHOR("Amir Vadai <amir@vadai.me>");
636MODULE_DESCRIPTION("ip tunnel manipulation actions");
637MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
4 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
5 */
6
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/kernel.h>
10#include <linux/skbuff.h>
11#include <linux/rtnetlink.h>
12#include <net/geneve.h>
13#include <net/vxlan.h>
14#include <net/erspan.h>
15#include <net/netlink.h>
16#include <net/pkt_sched.h>
17#include <net/dst.h>
18#include <net/pkt_cls.h>
19
20#include <linux/tc_act/tc_tunnel_key.h>
21#include <net/tc_act/tc_tunnel_key.h>
22
23static unsigned int tunnel_key_net_id;
24static struct tc_action_ops act_tunnel_key_ops;
25
26static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
27 struct tcf_result *res)
28{
29 struct tcf_tunnel_key *t = to_tunnel_key(a);
30 struct tcf_tunnel_key_params *params;
31 int action;
32
33 params = rcu_dereference_bh(t->params);
34
35 tcf_lastuse_update(&t->tcf_tm);
36 tcf_action_update_bstats(&t->common, skb);
37 action = READ_ONCE(t->tcf_action);
38
39 switch (params->tcft_action) {
40 case TCA_TUNNEL_KEY_ACT_RELEASE:
41 skb_dst_drop(skb);
42 break;
43 case TCA_TUNNEL_KEY_ACT_SET:
44 skb_dst_drop(skb);
45 skb_dst_set(skb, dst_clone(¶ms->tcft_enc_metadata->dst));
46 break;
47 default:
48 WARN_ONCE(1, "Bad tunnel_key action %d.\n",
49 params->tcft_action);
50 break;
51 }
52
53 return action;
54}
55
56static const struct nla_policy
57enc_opts_policy[TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1] = {
58 [TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC] = {
59 .strict_start_type = TCA_TUNNEL_KEY_ENC_OPTS_VXLAN },
60 [TCA_TUNNEL_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
61 [TCA_TUNNEL_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
62 [TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
63};
64
65static const struct nla_policy
66geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = {
67 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
68 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
69 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
70 .len = 128 },
71};
72
73static const struct nla_policy
74vxlan_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1] = {
75 [TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
76};
77
78static const struct nla_policy
79erspan_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
80 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
81 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
82 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
83 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
84};
85
86static int
87tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len,
88 struct netlink_ext_ack *extack)
89{
90 struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1];
91 int err, data_len, opt_len;
92 u8 *data;
93
94 err = nla_parse_nested_deprecated(tb,
95 TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX,
96 nla, geneve_opt_policy, extack);
97 if (err < 0)
98 return err;
99
100 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] ||
101 !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] ||
102 !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]) {
103 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
104 return -EINVAL;
105 }
106
107 data = nla_data(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
108 data_len = nla_len(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
109 if (data_len < 4) {
110 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
111 return -ERANGE;
112 }
113 if (data_len % 4) {
114 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
115 return -ERANGE;
116 }
117
118 opt_len = sizeof(struct geneve_opt) + data_len;
119 if (dst) {
120 struct geneve_opt *opt = dst;
121
122 WARN_ON(dst_len < opt_len);
123
124 opt->opt_class =
125 nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]);
126 opt->type = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]);
127 opt->length = data_len / 4; /* length is in units of 4 bytes */
128 opt->r1 = 0;
129 opt->r2 = 0;
130 opt->r3 = 0;
131
132 memcpy(opt + 1, data, data_len);
133 }
134
135 return opt_len;
136}
137
138static int
139tunnel_key_copy_vxlan_opt(const struct nlattr *nla, void *dst, int dst_len,
140 struct netlink_ext_ack *extack)
141{
142 struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1];
143 int err;
144
145 err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX, nla,
146 vxlan_opt_policy, extack);
147 if (err < 0)
148 return err;
149
150 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]) {
151 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
152 return -EINVAL;
153 }
154
155 if (dst) {
156 struct vxlan_metadata *md = dst;
157
158 md->gbp = nla_get_u32(tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]);
159 md->gbp &= VXLAN_GBP_MASK;
160 }
161
162 return sizeof(struct vxlan_metadata);
163}
164
165static int
166tunnel_key_copy_erspan_opt(const struct nlattr *nla, void *dst, int dst_len,
167 struct netlink_ext_ack *extack)
168{
169 struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1];
170 int err;
171 u8 ver;
172
173 err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX, nla,
174 erspan_opt_policy, extack);
175 if (err < 0)
176 return err;
177
178 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]) {
179 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
180 return -EINVAL;
181 }
182
183 ver = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]);
184 if (ver == 1) {
185 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX]) {
186 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
187 return -EINVAL;
188 }
189 } else if (ver == 2) {
190 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR] ||
191 !tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID]) {
192 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
193 return -EINVAL;
194 }
195 } else {
196 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
197 return -EINVAL;
198 }
199
200 if (dst) {
201 struct erspan_metadata *md = dst;
202
203 md->version = ver;
204 if (ver == 1) {
205 nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX];
206 md->u.index = nla_get_be32(nla);
207 } else {
208 nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR];
209 md->u.md2.dir = nla_get_u8(nla);
210 nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID];
211 set_hwid(&md->u.md2, nla_get_u8(nla));
212 }
213 }
214
215 return sizeof(struct erspan_metadata);
216}
217
218static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
219 int dst_len, struct netlink_ext_ack *extack)
220{
221 int err, rem, opt_len, len = nla_len(nla), opts_len = 0, type = 0;
222 const struct nlattr *attr, *head = nla_data(nla);
223
224 err = nla_validate_deprecated(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX,
225 enc_opts_policy, extack);
226 if (err)
227 return err;
228
229 nla_for_each_attr(attr, head, len, rem) {
230 switch (nla_type(attr)) {
231 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
232 if (type && type != TUNNEL_GENEVE_OPT) {
233 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
234 return -EINVAL;
235 }
236 opt_len = tunnel_key_copy_geneve_opt(attr, dst,
237 dst_len, extack);
238 if (opt_len < 0)
239 return opt_len;
240 opts_len += opt_len;
241 if (opts_len > IP_TUNNEL_OPTS_MAX) {
242 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
243 return -EINVAL;
244 }
245 if (dst) {
246 dst_len -= opt_len;
247 dst += opt_len;
248 }
249 type = TUNNEL_GENEVE_OPT;
250 break;
251 case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
252 if (type) {
253 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
254 return -EINVAL;
255 }
256 opt_len = tunnel_key_copy_vxlan_opt(attr, dst,
257 dst_len, extack);
258 if (opt_len < 0)
259 return opt_len;
260 opts_len += opt_len;
261 type = TUNNEL_VXLAN_OPT;
262 break;
263 case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
264 if (type) {
265 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
266 return -EINVAL;
267 }
268 opt_len = tunnel_key_copy_erspan_opt(attr, dst,
269 dst_len, extack);
270 if (opt_len < 0)
271 return opt_len;
272 opts_len += opt_len;
273 type = TUNNEL_ERSPAN_OPT;
274 break;
275 }
276 }
277
278 if (!opts_len) {
279 NL_SET_ERR_MSG(extack, "Empty list of tunnel options");
280 return -EINVAL;
281 }
282
283 if (rem > 0) {
284 NL_SET_ERR_MSG(extack, "Trailing data after parsing tunnel key options attributes");
285 return -EINVAL;
286 }
287
288 return opts_len;
289}
290
291static int tunnel_key_get_opts_len(struct nlattr *nla,
292 struct netlink_ext_ack *extack)
293{
294 return tunnel_key_copy_opts(nla, NULL, 0, extack);
295}
296
297static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
298 int opts_len, struct netlink_ext_ack *extack)
299{
300 info->options_len = opts_len;
301 switch (nla_type(nla_data(nla))) {
302 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
303#if IS_ENABLED(CONFIG_INET)
304 info->key.tun_flags |= TUNNEL_GENEVE_OPT;
305 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
306 opts_len, extack);
307#else
308 return -EAFNOSUPPORT;
309#endif
310 case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
311#if IS_ENABLED(CONFIG_INET)
312 info->key.tun_flags |= TUNNEL_VXLAN_OPT;
313 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
314 opts_len, extack);
315#else
316 return -EAFNOSUPPORT;
317#endif
318 case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
319#if IS_ENABLED(CONFIG_INET)
320 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
321 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
322 opts_len, extack);
323#else
324 return -EAFNOSUPPORT;
325#endif
326 default:
327 NL_SET_ERR_MSG(extack, "Cannot set tunnel options for unknown tunnel type");
328 return -EINVAL;
329 }
330}
331
332static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
333 [TCA_TUNNEL_KEY_PARMS] = { .len = sizeof(struct tc_tunnel_key) },
334 [TCA_TUNNEL_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
335 [TCA_TUNNEL_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
336 [TCA_TUNNEL_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
337 [TCA_TUNNEL_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
338 [TCA_TUNNEL_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
339 [TCA_TUNNEL_KEY_ENC_DST_PORT] = {.type = NLA_U16},
340 [TCA_TUNNEL_KEY_NO_CSUM] = { .type = NLA_U8 },
341 [TCA_TUNNEL_KEY_ENC_OPTS] = { .type = NLA_NESTED },
342 [TCA_TUNNEL_KEY_ENC_TOS] = { .type = NLA_U8 },
343 [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
344};
345
346static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
347{
348 if (!p)
349 return;
350 if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
351 dst_release(&p->tcft_enc_metadata->dst);
352
353 kfree_rcu(p, rcu);
354}
355
356static int tunnel_key_init(struct net *net, struct nlattr *nla,
357 struct nlattr *est, struct tc_action **a,
358 int ovr, int bind, bool rtnl_held,
359 struct tcf_proto *tp, u32 act_flags,
360 struct netlink_ext_ack *extack)
361{
362 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
363 struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
364 struct tcf_tunnel_key_params *params_new;
365 struct metadata_dst *metadata = NULL;
366 struct tcf_chain *goto_ch = NULL;
367 struct tc_tunnel_key *parm;
368 struct tcf_tunnel_key *t;
369 bool exists = false;
370 __be16 dst_port = 0;
371 __be64 key_id = 0;
372 int opts_len = 0;
373 __be16 flags = 0;
374 u8 tos, ttl;
375 int ret = 0;
376 u32 index;
377 int err;
378
379 if (!nla) {
380 NL_SET_ERR_MSG(extack, "Tunnel requires attributes to be passed");
381 return -EINVAL;
382 }
383
384 err = nla_parse_nested_deprecated(tb, TCA_TUNNEL_KEY_MAX, nla,
385 tunnel_key_policy, extack);
386 if (err < 0) {
387 NL_SET_ERR_MSG(extack, "Failed to parse nested tunnel key attributes");
388 return err;
389 }
390
391 if (!tb[TCA_TUNNEL_KEY_PARMS]) {
392 NL_SET_ERR_MSG(extack, "Missing tunnel key parameters");
393 return -EINVAL;
394 }
395
396 parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
397 index = parm->index;
398 err = tcf_idr_check_alloc(tn, &index, a, bind);
399 if (err < 0)
400 return err;
401 exists = err;
402 if (exists && bind)
403 return 0;
404
405 switch (parm->t_action) {
406 case TCA_TUNNEL_KEY_ACT_RELEASE:
407 break;
408 case TCA_TUNNEL_KEY_ACT_SET:
409 if (tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
410 __be32 key32;
411
412 key32 = nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]);
413 key_id = key32_to_tunnel_id(key32);
414 flags = TUNNEL_KEY;
415 }
416
417 flags |= TUNNEL_CSUM;
418 if (tb[TCA_TUNNEL_KEY_NO_CSUM] &&
419 nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM]))
420 flags &= ~TUNNEL_CSUM;
421
422 if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
423 dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);
424
425 if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) {
426 opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS],
427 extack);
428 if (opts_len < 0) {
429 ret = opts_len;
430 goto err_out;
431 }
432 }
433
434 tos = 0;
435 if (tb[TCA_TUNNEL_KEY_ENC_TOS])
436 tos = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TOS]);
437 ttl = 0;
438 if (tb[TCA_TUNNEL_KEY_ENC_TTL])
439 ttl = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TTL]);
440
441 if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
442 tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
443 __be32 saddr;
444 __be32 daddr;
445
446 saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]);
447 daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);
448
449 metadata = __ip_tun_set_dst(saddr, daddr, tos, ttl,
450 dst_port, flags,
451 key_id, opts_len);
452 } else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
453 tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
454 struct in6_addr saddr;
455 struct in6_addr daddr;
456
457 saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]);
458 daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);
459
460 metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
461 0, flags,
462 key_id, 0);
463 } else {
464 NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
465 ret = -EINVAL;
466 goto err_out;
467 }
468
469 if (!metadata) {
470 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel metadata dst");
471 ret = -ENOMEM;
472 goto err_out;
473 }
474
475#ifdef CONFIG_DST_CACHE
476 ret = dst_cache_init(&metadata->u.tun_info.dst_cache, GFP_KERNEL);
477 if (ret)
478 goto release_tun_meta;
479#endif
480
481 if (opts_len) {
482 ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS],
483 &metadata->u.tun_info,
484 opts_len, extack);
485 if (ret < 0)
486 goto release_tun_meta;
487 }
488
489 metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
490 break;
491 default:
492 NL_SET_ERR_MSG(extack, "Unknown tunnel key action");
493 ret = -EINVAL;
494 goto err_out;
495 }
496
497 if (!exists) {
498 ret = tcf_idr_create_from_flags(tn, index, est, a,
499 &act_tunnel_key_ops, bind,
500 act_flags);
501 if (ret) {
502 NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
503 goto release_tun_meta;
504 }
505
506 ret = ACT_P_CREATED;
507 } else if (!ovr) {
508 NL_SET_ERR_MSG(extack, "TC IDR already exists");
509 ret = -EEXIST;
510 goto release_tun_meta;
511 }
512
513 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
514 if (err < 0) {
515 ret = err;
516 exists = true;
517 goto release_tun_meta;
518 }
519 t = to_tunnel_key(*a);
520
521 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
522 if (unlikely(!params_new)) {
523 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
524 ret = -ENOMEM;
525 exists = true;
526 goto put_chain;
527 }
528 params_new->tcft_action = parm->t_action;
529 params_new->tcft_enc_metadata = metadata;
530
531 spin_lock_bh(&t->tcf_lock);
532 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
533 params_new = rcu_replace_pointer(t->params, params_new,
534 lockdep_is_held(&t->tcf_lock));
535 spin_unlock_bh(&t->tcf_lock);
536 tunnel_key_release_params(params_new);
537 if (goto_ch)
538 tcf_chain_put_by_act(goto_ch);
539
540 return ret;
541
542put_chain:
543 if (goto_ch)
544 tcf_chain_put_by_act(goto_ch);
545
546release_tun_meta:
547 if (metadata)
548 dst_release(&metadata->dst);
549
550err_out:
551 if (exists)
552 tcf_idr_release(*a, bind);
553 else
554 tcf_idr_cleanup(tn, index);
555 return ret;
556}
557
558static void tunnel_key_release(struct tc_action *a)
559{
560 struct tcf_tunnel_key *t = to_tunnel_key(a);
561 struct tcf_tunnel_key_params *params;
562
563 params = rcu_dereference_protected(t->params, 1);
564 tunnel_key_release_params(params);
565}
566
567static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
568 const struct ip_tunnel_info *info)
569{
570 int len = info->options_len;
571 u8 *src = (u8 *)(info + 1);
572 struct nlattr *start;
573
574 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE);
575 if (!start)
576 return -EMSGSIZE;
577
578 while (len > 0) {
579 struct geneve_opt *opt = (struct geneve_opt *)src;
580
581 if (nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS,
582 opt->opt_class) ||
583 nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
584 opt->type) ||
585 nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
586 opt->length * 4, opt + 1)) {
587 nla_nest_cancel(skb, start);
588 return -EMSGSIZE;
589 }
590
591 len -= sizeof(struct geneve_opt) + opt->length * 4;
592 src += sizeof(struct geneve_opt) + opt->length * 4;
593 }
594
595 nla_nest_end(skb, start);
596 return 0;
597}
598
599static int tunnel_key_vxlan_opts_dump(struct sk_buff *skb,
600 const struct ip_tunnel_info *info)
601{
602 struct vxlan_metadata *md = (struct vxlan_metadata *)(info + 1);
603 struct nlattr *start;
604
605 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_VXLAN);
606 if (!start)
607 return -EMSGSIZE;
608
609 if (nla_put_u32(skb, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP, md->gbp)) {
610 nla_nest_cancel(skb, start);
611 return -EMSGSIZE;
612 }
613
614 nla_nest_end(skb, start);
615 return 0;
616}
617
618static int tunnel_key_erspan_opts_dump(struct sk_buff *skb,
619 const struct ip_tunnel_info *info)
620{
621 struct erspan_metadata *md = (struct erspan_metadata *)(info + 1);
622 struct nlattr *start;
623
624 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN);
625 if (!start)
626 return -EMSGSIZE;
627
628 if (nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER, md->version))
629 goto err;
630
631 if (md->version == 1 &&
632 nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
633 goto err;
634
635 if (md->version == 2 &&
636 (nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR,
637 md->u.md2.dir) ||
638 nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID,
639 get_hwid(&md->u.md2))))
640 goto err;
641
642 nla_nest_end(skb, start);
643 return 0;
644err:
645 nla_nest_cancel(skb, start);
646 return -EMSGSIZE;
647}
648
649static int tunnel_key_opts_dump(struct sk_buff *skb,
650 const struct ip_tunnel_info *info)
651{
652 struct nlattr *start;
653 int err = -EINVAL;
654
655 if (!info->options_len)
656 return 0;
657
658 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS);
659 if (!start)
660 return -EMSGSIZE;
661
662 if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
663 err = tunnel_key_geneve_opts_dump(skb, info);
664 if (err)
665 goto err_out;
666 } else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
667 err = tunnel_key_vxlan_opts_dump(skb, info);
668 if (err)
669 goto err_out;
670 } else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) {
671 err = tunnel_key_erspan_opts_dump(skb, info);
672 if (err)
673 goto err_out;
674 } else {
675err_out:
676 nla_nest_cancel(skb, start);
677 return err;
678 }
679
680 nla_nest_end(skb, start);
681 return 0;
682}
683
684static int tunnel_key_dump_addresses(struct sk_buff *skb,
685 const struct ip_tunnel_info *info)
686{
687 unsigned short family = ip_tunnel_info_af(info);
688
689 if (family == AF_INET) {
690 __be32 saddr = info->key.u.ipv4.src;
691 __be32 daddr = info->key.u.ipv4.dst;
692
693 if (!nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_SRC, saddr) &&
694 !nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_DST, daddr))
695 return 0;
696 }
697
698 if (family == AF_INET6) {
699 const struct in6_addr *saddr6 = &info->key.u.ipv6.src;
700 const struct in6_addr *daddr6 = &info->key.u.ipv6.dst;
701
702 if (!nla_put_in6_addr(skb,
703 TCA_TUNNEL_KEY_ENC_IPV6_SRC, saddr6) &&
704 !nla_put_in6_addr(skb,
705 TCA_TUNNEL_KEY_ENC_IPV6_DST, daddr6))
706 return 0;
707 }
708
709 return -EINVAL;
710}
711
712static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
713 int bind, int ref)
714{
715 unsigned char *b = skb_tail_pointer(skb);
716 struct tcf_tunnel_key *t = to_tunnel_key(a);
717 struct tcf_tunnel_key_params *params;
718 struct tc_tunnel_key opt = {
719 .index = t->tcf_index,
720 .refcnt = refcount_read(&t->tcf_refcnt) - ref,
721 .bindcnt = atomic_read(&t->tcf_bindcnt) - bind,
722 };
723 struct tcf_t tm;
724
725 spin_lock_bh(&t->tcf_lock);
726 params = rcu_dereference_protected(t->params,
727 lockdep_is_held(&t->tcf_lock));
728 opt.action = t->tcf_action;
729 opt.t_action = params->tcft_action;
730
731 if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
732 goto nla_put_failure;
733
734 if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
735 struct ip_tunnel_info *info =
736 ¶ms->tcft_enc_metadata->u.tun_info;
737 struct ip_tunnel_key *key = &info->key;
738 __be32 key_id = tunnel_id_to_key32(key->tun_id);
739
740 if (((key->tun_flags & TUNNEL_KEY) &&
741 nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id)) ||
742 tunnel_key_dump_addresses(skb,
743 ¶ms->tcft_enc_metadata->u.tun_info) ||
744 (key->tp_dst &&
745 nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT,
746 key->tp_dst)) ||
747 nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM,
748 !(key->tun_flags & TUNNEL_CSUM)) ||
749 tunnel_key_opts_dump(skb, info))
750 goto nla_put_failure;
751
752 if (key->tos && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TOS, key->tos))
753 goto nla_put_failure;
754
755 if (key->ttl && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TTL, key->ttl))
756 goto nla_put_failure;
757 }
758
759 tcf_tm_dump(&tm, &t->tcf_tm);
760 if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm),
761 &tm, TCA_TUNNEL_KEY_PAD))
762 goto nla_put_failure;
763 spin_unlock_bh(&t->tcf_lock);
764
765 return skb->len;
766
767nla_put_failure:
768 spin_unlock_bh(&t->tcf_lock);
769 nlmsg_trim(skb, b);
770 return -1;
771}
772
773static int tunnel_key_walker(struct net *net, struct sk_buff *skb,
774 struct netlink_callback *cb, int type,
775 const struct tc_action_ops *ops,
776 struct netlink_ext_ack *extack)
777{
778 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
779
780 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
781}
782
783static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index)
784{
785 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
786
787 return tcf_idr_search(tn, a, index);
788}
789
790static struct tc_action_ops act_tunnel_key_ops = {
791 .kind = "tunnel_key",
792 .id = TCA_ID_TUNNEL_KEY,
793 .owner = THIS_MODULE,
794 .act = tunnel_key_act,
795 .dump = tunnel_key_dump,
796 .init = tunnel_key_init,
797 .cleanup = tunnel_key_release,
798 .walk = tunnel_key_walker,
799 .lookup = tunnel_key_search,
800 .size = sizeof(struct tcf_tunnel_key),
801};
802
803static __net_init int tunnel_key_init_net(struct net *net)
804{
805 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
806
807 return tc_action_net_init(net, tn, &act_tunnel_key_ops);
808}
809
810static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
811{
812 tc_action_net_exit(net_list, tunnel_key_net_id);
813}
814
815static struct pernet_operations tunnel_key_net_ops = {
816 .init = tunnel_key_init_net,
817 .exit_batch = tunnel_key_exit_net,
818 .id = &tunnel_key_net_id,
819 .size = sizeof(struct tc_action_net),
820};
821
822static int __init tunnel_key_init_module(void)
823{
824 return tcf_register_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
825}
826
827static void __exit tunnel_key_cleanup_module(void)
828{
829 tcf_unregister_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
830}
831
832module_init(tunnel_key_init_module);
833module_exit(tunnel_key_cleanup_module);
834
835MODULE_AUTHOR("Amir Vadai <amir@vadai.me>");
836MODULE_DESCRIPTION("ip tunnel manipulation actions");
837MODULE_LICENSE("GPL v2");