Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
4 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
5 */
6
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/kernel.h>
10#include <linux/skbuff.h>
11#include <linux/rtnetlink.h>
12#include <net/geneve.h>
13#include <net/vxlan.h>
14#include <net/erspan.h>
15#include <net/netlink.h>
16#include <net/pkt_sched.h>
17#include <net/dst.h>
18#include <net/pkt_cls.h>
19#include <net/tc_wrapper.h>
20
21#include <linux/tc_act/tc_tunnel_key.h>
22#include <net/tc_act/tc_tunnel_key.h>
23
24static struct tc_action_ops act_tunnel_key_ops;
25
26TC_INDIRECT_SCOPE int tunnel_key_act(struct sk_buff *skb,
27 const struct tc_action *a,
28 struct tcf_result *res)
29{
30 struct tcf_tunnel_key *t = to_tunnel_key(a);
31 struct tcf_tunnel_key_params *params;
32 int action;
33
34 params = rcu_dereference_bh(t->params);
35
36 tcf_lastuse_update(&t->tcf_tm);
37 tcf_action_update_bstats(&t->common, skb);
38 action = READ_ONCE(t->tcf_action);
39
40 switch (params->tcft_action) {
41 case TCA_TUNNEL_KEY_ACT_RELEASE:
42 skb_dst_drop(skb);
43 break;
44 case TCA_TUNNEL_KEY_ACT_SET:
45 skb_dst_drop(skb);
46 skb_dst_set(skb, dst_clone(¶ms->tcft_enc_metadata->dst));
47 break;
48 default:
49 WARN_ONCE(1, "Bad tunnel_key action %d.\n",
50 params->tcft_action);
51 break;
52 }
53
54 return action;
55}
56
57static const struct nla_policy
58enc_opts_policy[TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1] = {
59 [TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC] = {
60 .strict_start_type = TCA_TUNNEL_KEY_ENC_OPTS_VXLAN },
61 [TCA_TUNNEL_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
62 [TCA_TUNNEL_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
63 [TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
64};
65
66static const struct nla_policy
67geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = {
68 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
69 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
70 [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
71 .len = 128 },
72};
73
74static const struct nla_policy
75vxlan_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1] = {
76 [TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
77};
78
79static const struct nla_policy
80erspan_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
81 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
82 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
83 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
84 [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
85};
86
87static int
88tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len,
89 struct netlink_ext_ack *extack)
90{
91 struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1];
92 int err, data_len, opt_len;
93 u8 *data;
94
95 err = nla_parse_nested_deprecated(tb,
96 TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX,
97 nla, geneve_opt_policy, extack);
98 if (err < 0)
99 return err;
100
101 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] ||
102 !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] ||
103 !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]) {
104 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
105 return -EINVAL;
106 }
107
108 data = nla_data(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
109 data_len = nla_len(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
110 if (data_len < 4) {
111 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
112 return -ERANGE;
113 }
114 if (data_len % 4) {
115 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
116 return -ERANGE;
117 }
118
119 opt_len = sizeof(struct geneve_opt) + data_len;
120 if (dst) {
121 struct geneve_opt *opt = dst;
122
123 WARN_ON(dst_len < opt_len);
124
125 opt->opt_class =
126 nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]);
127 opt->type = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]);
128 opt->length = data_len / 4; /* length is in units of 4 bytes */
129 opt->r1 = 0;
130 opt->r2 = 0;
131 opt->r3 = 0;
132
133 memcpy(opt + 1, data, data_len);
134 }
135
136 return opt_len;
137}
138
139static int
140tunnel_key_copy_vxlan_opt(const struct nlattr *nla, void *dst, int dst_len,
141 struct netlink_ext_ack *extack)
142{
143 struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1];
144 int err;
145
146 err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX, nla,
147 vxlan_opt_policy, extack);
148 if (err < 0)
149 return err;
150
151 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]) {
152 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
153 return -EINVAL;
154 }
155
156 if (dst) {
157 struct vxlan_metadata *md = dst;
158
159 md->gbp = nla_get_u32(tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]);
160 md->gbp &= VXLAN_GBP_MASK;
161 }
162
163 return sizeof(struct vxlan_metadata);
164}
165
166static int
167tunnel_key_copy_erspan_opt(const struct nlattr *nla, void *dst, int dst_len,
168 struct netlink_ext_ack *extack)
169{
170 struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1];
171 int err;
172 u8 ver;
173
174 err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX, nla,
175 erspan_opt_policy, extack);
176 if (err < 0)
177 return err;
178
179 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]) {
180 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
181 return -EINVAL;
182 }
183
184 ver = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]);
185 if (ver == 1) {
186 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX]) {
187 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
188 return -EINVAL;
189 }
190 } else if (ver == 2) {
191 if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR] ||
192 !tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID]) {
193 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
194 return -EINVAL;
195 }
196 } else {
197 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
198 return -EINVAL;
199 }
200
201 if (dst) {
202 struct erspan_metadata *md = dst;
203
204 md->version = ver;
205 if (ver == 1) {
206 nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX];
207 md->u.index = nla_get_be32(nla);
208 } else {
209 nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR];
210 md->u.md2.dir = nla_get_u8(nla);
211 nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID];
212 set_hwid(&md->u.md2, nla_get_u8(nla));
213 }
214 }
215
216 return sizeof(struct erspan_metadata);
217}
218
219static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
220 int dst_len, struct netlink_ext_ack *extack)
221{
222 int err, rem, opt_len, len = nla_len(nla), opts_len = 0, type = 0;
223 const struct nlattr *attr, *head = nla_data(nla);
224
225 err = nla_validate_deprecated(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX,
226 enc_opts_policy, extack);
227 if (err)
228 return err;
229
230 nla_for_each_attr(attr, head, len, rem) {
231 switch (nla_type(attr)) {
232 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
233 if (type && type != TUNNEL_GENEVE_OPT) {
234 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
235 return -EINVAL;
236 }
237 opt_len = tunnel_key_copy_geneve_opt(attr, dst,
238 dst_len, extack);
239 if (opt_len < 0)
240 return opt_len;
241 opts_len += opt_len;
242 if (opts_len > IP_TUNNEL_OPTS_MAX) {
243 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
244 return -EINVAL;
245 }
246 if (dst) {
247 dst_len -= opt_len;
248 dst += opt_len;
249 }
250 type = TUNNEL_GENEVE_OPT;
251 break;
252 case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
253 if (type) {
254 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
255 return -EINVAL;
256 }
257 opt_len = tunnel_key_copy_vxlan_opt(attr, dst,
258 dst_len, extack);
259 if (opt_len < 0)
260 return opt_len;
261 opts_len += opt_len;
262 type = TUNNEL_VXLAN_OPT;
263 break;
264 case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
265 if (type) {
266 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
267 return -EINVAL;
268 }
269 opt_len = tunnel_key_copy_erspan_opt(attr, dst,
270 dst_len, extack);
271 if (opt_len < 0)
272 return opt_len;
273 opts_len += opt_len;
274 type = TUNNEL_ERSPAN_OPT;
275 break;
276 }
277 }
278
279 if (!opts_len) {
280 NL_SET_ERR_MSG(extack, "Empty list of tunnel options");
281 return -EINVAL;
282 }
283
284 if (rem > 0) {
285 NL_SET_ERR_MSG(extack, "Trailing data after parsing tunnel key options attributes");
286 return -EINVAL;
287 }
288
289 return opts_len;
290}
291
292static int tunnel_key_get_opts_len(struct nlattr *nla,
293 struct netlink_ext_ack *extack)
294{
295 return tunnel_key_copy_opts(nla, NULL, 0, extack);
296}
297
298static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
299 int opts_len, struct netlink_ext_ack *extack)
300{
301 info->options_len = opts_len;
302 switch (nla_type(nla_data(nla))) {
303 case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
304#if IS_ENABLED(CONFIG_INET)
305 info->key.tun_flags |= TUNNEL_GENEVE_OPT;
306 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
307 opts_len, extack);
308#else
309 return -EAFNOSUPPORT;
310#endif
311 case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
312#if IS_ENABLED(CONFIG_INET)
313 info->key.tun_flags |= TUNNEL_VXLAN_OPT;
314 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
315 opts_len, extack);
316#else
317 return -EAFNOSUPPORT;
318#endif
319 case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
320#if IS_ENABLED(CONFIG_INET)
321 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
322 return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
323 opts_len, extack);
324#else
325 return -EAFNOSUPPORT;
326#endif
327 default:
328 NL_SET_ERR_MSG(extack, "Cannot set tunnel options for unknown tunnel type");
329 return -EINVAL;
330 }
331}
332
333static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
334 [TCA_TUNNEL_KEY_PARMS] = { .len = sizeof(struct tc_tunnel_key) },
335 [TCA_TUNNEL_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
336 [TCA_TUNNEL_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
337 [TCA_TUNNEL_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
338 [TCA_TUNNEL_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
339 [TCA_TUNNEL_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
340 [TCA_TUNNEL_KEY_ENC_DST_PORT] = {.type = NLA_U16},
341 [TCA_TUNNEL_KEY_NO_CSUM] = { .type = NLA_U8 },
342 [TCA_TUNNEL_KEY_ENC_OPTS] = { .type = NLA_NESTED },
343 [TCA_TUNNEL_KEY_ENC_TOS] = { .type = NLA_U8 },
344 [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
345};
346
347static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
348{
349 if (!p)
350 return;
351 if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
352 dst_release(&p->tcft_enc_metadata->dst);
353
354 kfree_rcu(p, rcu);
355}
356
357static int tunnel_key_init(struct net *net, struct nlattr *nla,
358 struct nlattr *est, struct tc_action **a,
359 struct tcf_proto *tp, u32 act_flags,
360 struct netlink_ext_ack *extack)
361{
362 struct tc_action_net *tn = net_generic(net, act_tunnel_key_ops.net_id);
363 bool bind = act_flags & TCA_ACT_FLAGS_BIND;
364 struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
365 struct tcf_tunnel_key_params *params_new;
366 struct metadata_dst *metadata = NULL;
367 struct tcf_chain *goto_ch = NULL;
368 struct tc_tunnel_key *parm;
369 struct tcf_tunnel_key *t;
370 bool exists = false;
371 __be16 dst_port = 0;
372 __be64 key_id = 0;
373 int opts_len = 0;
374 __be16 flags = 0;
375 u8 tos, ttl;
376 int ret = 0;
377 u32 index;
378 int err;
379
380 if (!nla) {
381 NL_SET_ERR_MSG(extack, "Tunnel requires attributes to be passed");
382 return -EINVAL;
383 }
384
385 err = nla_parse_nested_deprecated(tb, TCA_TUNNEL_KEY_MAX, nla,
386 tunnel_key_policy, extack);
387 if (err < 0) {
388 NL_SET_ERR_MSG(extack, "Failed to parse nested tunnel key attributes");
389 return err;
390 }
391
392 if (!tb[TCA_TUNNEL_KEY_PARMS]) {
393 NL_SET_ERR_MSG(extack, "Missing tunnel key parameters");
394 return -EINVAL;
395 }
396
397 parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
398 index = parm->index;
399 err = tcf_idr_check_alloc(tn, &index, a, bind);
400 if (err < 0)
401 return err;
402 exists = err;
403 if (exists && bind)
404 return 0;
405
406 switch (parm->t_action) {
407 case TCA_TUNNEL_KEY_ACT_RELEASE:
408 break;
409 case TCA_TUNNEL_KEY_ACT_SET:
410 if (tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
411 __be32 key32;
412
413 key32 = nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]);
414 key_id = key32_to_tunnel_id(key32);
415 flags = TUNNEL_KEY;
416 }
417
418 flags |= TUNNEL_CSUM;
419 if (tb[TCA_TUNNEL_KEY_NO_CSUM] &&
420 nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM]))
421 flags &= ~TUNNEL_CSUM;
422
423 if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
424 dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);
425
426 if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) {
427 opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS],
428 extack);
429 if (opts_len < 0) {
430 ret = opts_len;
431 goto err_out;
432 }
433 }
434
435 tos = 0;
436 if (tb[TCA_TUNNEL_KEY_ENC_TOS])
437 tos = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TOS]);
438 ttl = 0;
439 if (tb[TCA_TUNNEL_KEY_ENC_TTL])
440 ttl = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TTL]);
441
442 if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
443 tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
444 __be32 saddr;
445 __be32 daddr;
446
447 saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]);
448 daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);
449
450 metadata = __ip_tun_set_dst(saddr, daddr, tos, ttl,
451 dst_port, flags,
452 key_id, opts_len);
453 } else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
454 tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
455 struct in6_addr saddr;
456 struct in6_addr daddr;
457
458 saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]);
459 daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);
460
461 metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
462 0, flags,
463 key_id, opts_len);
464 } else {
465 NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
466 ret = -EINVAL;
467 goto err_out;
468 }
469
470 if (!metadata) {
471 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel metadata dst");
472 ret = -ENOMEM;
473 goto err_out;
474 }
475
476#ifdef CONFIG_DST_CACHE
477 ret = dst_cache_init(&metadata->u.tun_info.dst_cache, GFP_KERNEL);
478 if (ret)
479 goto release_tun_meta;
480#endif
481
482 if (opts_len) {
483 ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS],
484 &metadata->u.tun_info,
485 opts_len, extack);
486 if (ret < 0)
487 goto release_tun_meta;
488 }
489
490 metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
491 break;
492 default:
493 NL_SET_ERR_MSG(extack, "Unknown tunnel key action");
494 ret = -EINVAL;
495 goto err_out;
496 }
497
498 if (!exists) {
499 ret = tcf_idr_create_from_flags(tn, index, est, a,
500 &act_tunnel_key_ops, bind,
501 act_flags);
502 if (ret) {
503 NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
504 goto release_tun_meta;
505 }
506
507 ret = ACT_P_CREATED;
508 } else if (!(act_flags & TCA_ACT_FLAGS_REPLACE)) {
509 NL_SET_ERR_MSG(extack, "TC IDR already exists");
510 ret = -EEXIST;
511 goto release_tun_meta;
512 }
513
514 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
515 if (err < 0) {
516 ret = err;
517 exists = true;
518 goto release_tun_meta;
519 }
520 t = to_tunnel_key(*a);
521
522 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
523 if (unlikely(!params_new)) {
524 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
525 ret = -ENOMEM;
526 exists = true;
527 goto put_chain;
528 }
529 params_new->tcft_action = parm->t_action;
530 params_new->tcft_enc_metadata = metadata;
531
532 spin_lock_bh(&t->tcf_lock);
533 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
534 params_new = rcu_replace_pointer(t->params, params_new,
535 lockdep_is_held(&t->tcf_lock));
536 spin_unlock_bh(&t->tcf_lock);
537 tunnel_key_release_params(params_new);
538 if (goto_ch)
539 tcf_chain_put_by_act(goto_ch);
540
541 return ret;
542
543put_chain:
544 if (goto_ch)
545 tcf_chain_put_by_act(goto_ch);
546
547release_tun_meta:
548 if (metadata)
549 dst_release(&metadata->dst);
550
551err_out:
552 if (exists)
553 tcf_idr_release(*a, bind);
554 else
555 tcf_idr_cleanup(tn, index);
556 return ret;
557}
558
559static void tunnel_key_release(struct tc_action *a)
560{
561 struct tcf_tunnel_key *t = to_tunnel_key(a);
562 struct tcf_tunnel_key_params *params;
563
564 params = rcu_dereference_protected(t->params, 1);
565 tunnel_key_release_params(params);
566}
567
568static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
569 const struct ip_tunnel_info *info)
570{
571 int len = info->options_len;
572 u8 *src = (u8 *)(info + 1);
573 struct nlattr *start;
574
575 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE);
576 if (!start)
577 return -EMSGSIZE;
578
579 while (len > 0) {
580 struct geneve_opt *opt = (struct geneve_opt *)src;
581
582 if (nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS,
583 opt->opt_class) ||
584 nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
585 opt->type) ||
586 nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
587 opt->length * 4, opt + 1)) {
588 nla_nest_cancel(skb, start);
589 return -EMSGSIZE;
590 }
591
592 len -= sizeof(struct geneve_opt) + opt->length * 4;
593 src += sizeof(struct geneve_opt) + opt->length * 4;
594 }
595
596 nla_nest_end(skb, start);
597 return 0;
598}
599
600static int tunnel_key_vxlan_opts_dump(struct sk_buff *skb,
601 const struct ip_tunnel_info *info)
602{
603 struct vxlan_metadata *md = (struct vxlan_metadata *)(info + 1);
604 struct nlattr *start;
605
606 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_VXLAN);
607 if (!start)
608 return -EMSGSIZE;
609
610 if (nla_put_u32(skb, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP, md->gbp)) {
611 nla_nest_cancel(skb, start);
612 return -EMSGSIZE;
613 }
614
615 nla_nest_end(skb, start);
616 return 0;
617}
618
619static int tunnel_key_erspan_opts_dump(struct sk_buff *skb,
620 const struct ip_tunnel_info *info)
621{
622 struct erspan_metadata *md = (struct erspan_metadata *)(info + 1);
623 struct nlattr *start;
624
625 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN);
626 if (!start)
627 return -EMSGSIZE;
628
629 if (nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER, md->version))
630 goto err;
631
632 if (md->version == 1 &&
633 nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
634 goto err;
635
636 if (md->version == 2 &&
637 (nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR,
638 md->u.md2.dir) ||
639 nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID,
640 get_hwid(&md->u.md2))))
641 goto err;
642
643 nla_nest_end(skb, start);
644 return 0;
645err:
646 nla_nest_cancel(skb, start);
647 return -EMSGSIZE;
648}
649
650static int tunnel_key_opts_dump(struct sk_buff *skb,
651 const struct ip_tunnel_info *info)
652{
653 struct nlattr *start;
654 int err = -EINVAL;
655
656 if (!info->options_len)
657 return 0;
658
659 start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS);
660 if (!start)
661 return -EMSGSIZE;
662
663 if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
664 err = tunnel_key_geneve_opts_dump(skb, info);
665 if (err)
666 goto err_out;
667 } else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
668 err = tunnel_key_vxlan_opts_dump(skb, info);
669 if (err)
670 goto err_out;
671 } else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) {
672 err = tunnel_key_erspan_opts_dump(skb, info);
673 if (err)
674 goto err_out;
675 } else {
676err_out:
677 nla_nest_cancel(skb, start);
678 return err;
679 }
680
681 nla_nest_end(skb, start);
682 return 0;
683}
684
685static int tunnel_key_dump_addresses(struct sk_buff *skb,
686 const struct ip_tunnel_info *info)
687{
688 unsigned short family = ip_tunnel_info_af(info);
689
690 if (family == AF_INET) {
691 __be32 saddr = info->key.u.ipv4.src;
692 __be32 daddr = info->key.u.ipv4.dst;
693
694 if (!nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_SRC, saddr) &&
695 !nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_DST, daddr))
696 return 0;
697 }
698
699 if (family == AF_INET6) {
700 const struct in6_addr *saddr6 = &info->key.u.ipv6.src;
701 const struct in6_addr *daddr6 = &info->key.u.ipv6.dst;
702
703 if (!nla_put_in6_addr(skb,
704 TCA_TUNNEL_KEY_ENC_IPV6_SRC, saddr6) &&
705 !nla_put_in6_addr(skb,
706 TCA_TUNNEL_KEY_ENC_IPV6_DST, daddr6))
707 return 0;
708 }
709
710 return -EINVAL;
711}
712
713static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
714 int bind, int ref)
715{
716 unsigned char *b = skb_tail_pointer(skb);
717 struct tcf_tunnel_key *t = to_tunnel_key(a);
718 struct tcf_tunnel_key_params *params;
719 struct tc_tunnel_key opt = {
720 .index = t->tcf_index,
721 .refcnt = refcount_read(&t->tcf_refcnt) - ref,
722 .bindcnt = atomic_read(&t->tcf_bindcnt) - bind,
723 };
724 struct tcf_t tm;
725
726 spin_lock_bh(&t->tcf_lock);
727 params = rcu_dereference_protected(t->params,
728 lockdep_is_held(&t->tcf_lock));
729 opt.action = t->tcf_action;
730 opt.t_action = params->tcft_action;
731
732 if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
733 goto nla_put_failure;
734
735 if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
736 struct ip_tunnel_info *info =
737 ¶ms->tcft_enc_metadata->u.tun_info;
738 struct ip_tunnel_key *key = &info->key;
739 __be32 key_id = tunnel_id_to_key32(key->tun_id);
740
741 if (((key->tun_flags & TUNNEL_KEY) &&
742 nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id)) ||
743 tunnel_key_dump_addresses(skb,
744 ¶ms->tcft_enc_metadata->u.tun_info) ||
745 (key->tp_dst &&
746 nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT,
747 key->tp_dst)) ||
748 nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM,
749 !(key->tun_flags & TUNNEL_CSUM)) ||
750 tunnel_key_opts_dump(skb, info))
751 goto nla_put_failure;
752
753 if (key->tos && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TOS, key->tos))
754 goto nla_put_failure;
755
756 if (key->ttl && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TTL, key->ttl))
757 goto nla_put_failure;
758 }
759
760 tcf_tm_dump(&tm, &t->tcf_tm);
761 if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm),
762 &tm, TCA_TUNNEL_KEY_PAD))
763 goto nla_put_failure;
764 spin_unlock_bh(&t->tcf_lock);
765
766 return skb->len;
767
768nla_put_failure:
769 spin_unlock_bh(&t->tcf_lock);
770 nlmsg_trim(skb, b);
771 return -1;
772}
773
774static void tcf_tunnel_encap_put_tunnel(void *priv)
775{
776 struct ip_tunnel_info *tunnel = priv;
777
778 kfree(tunnel);
779}
780
781static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
782 const struct tc_action *act)
783{
784 entry->tunnel = tcf_tunnel_info_copy(act);
785 if (!entry->tunnel)
786 return -ENOMEM;
787 entry->destructor = tcf_tunnel_encap_put_tunnel;
788 entry->destructor_priv = entry->tunnel;
789 return 0;
790}
791
792static int tcf_tunnel_key_offload_act_setup(struct tc_action *act,
793 void *entry_data,
794 u32 *index_inc,
795 bool bind,
796 struct netlink_ext_ack *extack)
797{
798 int err;
799
800 if (bind) {
801 struct flow_action_entry *entry = entry_data;
802
803 if (is_tcf_tunnel_set(act)) {
804 entry->id = FLOW_ACTION_TUNNEL_ENCAP;
805 err = tcf_tunnel_encap_get_tunnel(entry, act);
806 if (err)
807 return err;
808 } else if (is_tcf_tunnel_release(act)) {
809 entry->id = FLOW_ACTION_TUNNEL_DECAP;
810 } else {
811 NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel key mode offload");
812 return -EOPNOTSUPP;
813 }
814 *index_inc = 1;
815 } else {
816 struct flow_offload_action *fl_action = entry_data;
817
818 if (is_tcf_tunnel_set(act))
819 fl_action->id = FLOW_ACTION_TUNNEL_ENCAP;
820 else if (is_tcf_tunnel_release(act))
821 fl_action->id = FLOW_ACTION_TUNNEL_DECAP;
822 else
823 return -EOPNOTSUPP;
824 }
825
826 return 0;
827}
828
829static struct tc_action_ops act_tunnel_key_ops = {
830 .kind = "tunnel_key",
831 .id = TCA_ID_TUNNEL_KEY,
832 .owner = THIS_MODULE,
833 .act = tunnel_key_act,
834 .dump = tunnel_key_dump,
835 .init = tunnel_key_init,
836 .cleanup = tunnel_key_release,
837 .offload_act_setup = tcf_tunnel_key_offload_act_setup,
838 .size = sizeof(struct tcf_tunnel_key),
839};
840
841static __net_init int tunnel_key_init_net(struct net *net)
842{
843 struct tc_action_net *tn = net_generic(net, act_tunnel_key_ops.net_id);
844
845 return tc_action_net_init(net, tn, &act_tunnel_key_ops);
846}
847
848static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
849{
850 tc_action_net_exit(net_list, act_tunnel_key_ops.net_id);
851}
852
853static struct pernet_operations tunnel_key_net_ops = {
854 .init = tunnel_key_init_net,
855 .exit_batch = tunnel_key_exit_net,
856 .id = &act_tunnel_key_ops.net_id,
857 .size = sizeof(struct tc_action_net),
858};
859
860static int __init tunnel_key_init_module(void)
861{
862 return tcf_register_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
863}
864
865static void __exit tunnel_key_cleanup_module(void)
866{
867 tcf_unregister_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
868}
869
870module_init(tunnel_key_init_module);
871module_exit(tunnel_key_cleanup_module);
872
873MODULE_AUTHOR("Amir Vadai <amir@vadai.me>");
874MODULE_DESCRIPTION("ip tunnel manipulation actions");
875MODULE_LICENSE("GPL v2");