Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * xfrm_output.c - Common IPsec encapsulation code.
4 *
5 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
6 */
7
8#include <linux/errno.h>
9#include <linux/module.h>
10#include <linux/netdevice.h>
11#include <linux/netfilter.h>
12#include <linux/skbuff.h>
13#include <linux/slab.h>
14#include <linux/spinlock.h>
15#include <net/dst.h>
16#include <net/icmp.h>
17#include <net/inet_ecn.h>
18#include <net/xfrm.h>
19
20#if IS_ENABLED(CONFIG_IPV6)
21#include <net/ip6_route.h>
22#include <net/ipv6_stubs.h>
23#endif
24
25#include "xfrm_inout.h"
26
27static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb);
28static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
29
30static int xfrm_skb_check_space(struct sk_buff *skb)
31{
32 struct dst_entry *dst = skb_dst(skb);
33 int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev)
34 - skb_headroom(skb);
35 int ntail = dst->dev->needed_tailroom - skb_tailroom(skb);
36
37 if (nhead <= 0) {
38 if (ntail <= 0)
39 return 0;
40 nhead = 0;
41 } else if (ntail < 0)
42 ntail = 0;
43
44 return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
45}
46
47/* Children define the path of the packet through the
48 * Linux networking. Thus, destinations are stackable.
49 */
50
51static struct dst_entry *skb_dst_pop(struct sk_buff *skb)
52{
53 struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb)));
54
55 skb_dst_drop(skb);
56 return child;
57}
58
59/* Add encapsulation header.
60 *
61 * The IP header will be moved forward to make space for the encapsulation
62 * header.
63 */
64static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
65{
66 struct iphdr *iph = ip_hdr(skb);
67 int ihl = iph->ihl * 4;
68
69 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
70
71 skb_set_network_header(skb, -x->props.header_len);
72 skb->mac_header = skb->network_header +
73 offsetof(struct iphdr, protocol);
74 skb->transport_header = skb->network_header + ihl;
75 __skb_pull(skb, ihl);
76 memmove(skb_network_header(skb), iph, ihl);
77 return 0;
78}
79
80#if IS_ENABLED(CONFIG_IPV6_MIP6)
81static int mip6_rthdr_offset(struct sk_buff *skb, u8 **nexthdr, int type)
82{
83 const unsigned char *nh = skb_network_header(skb);
84 unsigned int offset = sizeof(struct ipv6hdr);
85 unsigned int packet_len;
86 int found_rhdr = 0;
87
88 packet_len = skb_tail_pointer(skb) - nh;
89 *nexthdr = &ipv6_hdr(skb)->nexthdr;
90
91 while (offset <= packet_len) {
92 struct ipv6_opt_hdr *exthdr;
93
94 switch (**nexthdr) {
95 case NEXTHDR_HOP:
96 break;
97 case NEXTHDR_ROUTING:
98 if (type == IPPROTO_ROUTING && offset + 3 <= packet_len) {
99 struct ipv6_rt_hdr *rt;
100
101 rt = (struct ipv6_rt_hdr *)(nh + offset);
102 if (rt->type != 0)
103 return offset;
104 }
105 found_rhdr = 1;
106 break;
107 case NEXTHDR_DEST:
108 /* HAO MUST NOT appear more than once.
109 * XXX: It is better to try to find by the end of
110 * XXX: packet if HAO exists.
111 */
112 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) {
113 net_dbg_ratelimited("mip6: hao exists already, override\n");
114 return offset;
115 }
116
117 if (found_rhdr)
118 return offset;
119
120 break;
121 default:
122 return offset;
123 }
124
125 if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
126 return -EINVAL;
127
128 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
129 offset);
130 offset += ipv6_optlen(exthdr);
131 if (offset > IPV6_MAXPLEN)
132 return -EINVAL;
133 *nexthdr = &exthdr->nexthdr;
134 }
135
136 return -EINVAL;
137}
138#endif
139
140#if IS_ENABLED(CONFIG_IPV6)
141static int xfrm6_hdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr)
142{
143 switch (x->type->proto) {
144#if IS_ENABLED(CONFIG_IPV6_MIP6)
145 case IPPROTO_DSTOPTS:
146 case IPPROTO_ROUTING:
147 return mip6_rthdr_offset(skb, prevhdr, x->type->proto);
148#endif
149 default:
150 break;
151 }
152
153 return ip6_find_1stfragopt(skb, prevhdr);
154}
155#endif
156
157/* Add encapsulation header.
158 *
159 * The IP header and mutable extension headers will be moved forward to make
160 * space for the encapsulation header.
161 */
162static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
163{
164#if IS_ENABLED(CONFIG_IPV6)
165 struct ipv6hdr *iph;
166 u8 *prevhdr;
167 int hdr_len;
168
169 iph = ipv6_hdr(skb);
170 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
171
172 hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr);
173 if (hdr_len < 0)
174 return hdr_len;
175 skb_set_mac_header(skb,
176 (prevhdr - x->props.header_len) - skb->data);
177 skb_set_network_header(skb, -x->props.header_len);
178 skb->transport_header = skb->network_header + hdr_len;
179 __skb_pull(skb, hdr_len);
180 memmove(ipv6_hdr(skb), iph, hdr_len);
181 return 0;
182#else
183 WARN_ON_ONCE(1);
184 return -EAFNOSUPPORT;
185#endif
186}
187
188/* Add route optimization header space.
189 *
190 * The IP header and mutable extension headers will be moved forward to make
191 * space for the route optimization header.
192 */
193static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
194{
195#if IS_ENABLED(CONFIG_IPV6)
196 struct ipv6hdr *iph;
197 u8 *prevhdr;
198 int hdr_len;
199
200 iph = ipv6_hdr(skb);
201
202 hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr);
203 if (hdr_len < 0)
204 return hdr_len;
205 skb_set_mac_header(skb,
206 (prevhdr - x->props.header_len) - skb->data);
207 skb_set_network_header(skb, -x->props.header_len);
208 skb->transport_header = skb->network_header + hdr_len;
209 __skb_pull(skb, hdr_len);
210 memmove(ipv6_hdr(skb), iph, hdr_len);
211
212 return 0;
213#else
214 WARN_ON_ONCE(1);
215 return -EAFNOSUPPORT;
216#endif
217}
218
219/* Add encapsulation header.
220 *
221 * The top IP header will be constructed per draft-nikander-esp-beet-mode-06.txt.
222 */
223static int xfrm4_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
224{
225 struct ip_beet_phdr *ph;
226 struct iphdr *top_iph;
227 int hdrlen, optlen;
228
229 hdrlen = 0;
230 optlen = XFRM_MODE_SKB_CB(skb)->optlen;
231 if (unlikely(optlen))
232 hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4);
233
234 skb_set_network_header(skb, -x->props.header_len - hdrlen +
235 (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph)));
236 if (x->sel.family != AF_INET6)
237 skb->network_header += IPV4_BEET_PHMAXLEN;
238 skb->mac_header = skb->network_header +
239 offsetof(struct iphdr, protocol);
240 skb->transport_header = skb->network_header + sizeof(*top_iph);
241
242 xfrm4_beet_make_header(skb);
243
244 ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdrlen);
245
246 top_iph = ip_hdr(skb);
247
248 if (unlikely(optlen)) {
249 if (WARN_ON(optlen < 0))
250 return -EINVAL;
251
252 ph->padlen = 4 - (optlen & 4);
253 ph->hdrlen = optlen / 8;
254 ph->nexthdr = top_iph->protocol;
255 if (ph->padlen)
256 memset(ph + 1, IPOPT_NOP, ph->padlen);
257
258 top_iph->protocol = IPPROTO_BEETPH;
259 top_iph->ihl = sizeof(struct iphdr) / 4;
260 }
261
262 top_iph->saddr = x->props.saddr.a4;
263 top_iph->daddr = x->id.daddr.a4;
264
265 return 0;
266}
267
268/* Add encapsulation header.
269 *
270 * The top IP header will be constructed per RFC 2401.
271 */
272static int xfrm4_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
273{
274 bool small_ipv6 = (skb->protocol == htons(ETH_P_IPV6)) && (skb->len <= IPV6_MIN_MTU);
275 struct dst_entry *dst = skb_dst(skb);
276 struct iphdr *top_iph;
277 int flags;
278
279 skb_set_inner_network_header(skb, skb_network_offset(skb));
280 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
281
282 skb_set_network_header(skb, -x->props.header_len);
283 skb->mac_header = skb->network_header +
284 offsetof(struct iphdr, protocol);
285 skb->transport_header = skb->network_header + sizeof(*top_iph);
286 top_iph = ip_hdr(skb);
287
288 top_iph->ihl = 5;
289 top_iph->version = 4;
290
291 top_iph->protocol = xfrm_af2proto(skb_dst(skb)->ops->family);
292
293 /* DS disclosing depends on XFRM_SA_XFLAG_DONT_ENCAP_DSCP */
294 if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP)
295 top_iph->tos = 0;
296 else
297 top_iph->tos = XFRM_MODE_SKB_CB(skb)->tos;
298 top_iph->tos = INET_ECN_encapsulate(top_iph->tos,
299 XFRM_MODE_SKB_CB(skb)->tos);
300
301 flags = x->props.flags;
302 if (flags & XFRM_STATE_NOECN)
303 IP_ECN_clear(top_iph);
304
305 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) || small_ipv6 ?
306 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
307
308 top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst));
309
310 top_iph->saddr = x->props.saddr.a4;
311 top_iph->daddr = x->id.daddr.a4;
312 ip_select_ident(dev_net(dst->dev), skb, NULL);
313
314 return 0;
315}
316
317#if IS_ENABLED(CONFIG_IPV6)
318static int xfrm6_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
319{
320 struct dst_entry *dst = skb_dst(skb);
321 struct ipv6hdr *top_iph;
322 int dsfield;
323
324 skb_set_inner_network_header(skb, skb_network_offset(skb));
325 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
326
327 skb_set_network_header(skb, -x->props.header_len);
328 skb->mac_header = skb->network_header +
329 offsetof(struct ipv6hdr, nexthdr);
330 skb->transport_header = skb->network_header + sizeof(*top_iph);
331 top_iph = ipv6_hdr(skb);
332
333 top_iph->version = 6;
334
335 memcpy(top_iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl,
336 sizeof(top_iph->flow_lbl));
337 top_iph->nexthdr = xfrm_af2proto(skb_dst(skb)->ops->family);
338
339 if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP)
340 dsfield = 0;
341 else
342 dsfield = XFRM_MODE_SKB_CB(skb)->tos;
343 dsfield = INET_ECN_encapsulate(dsfield, XFRM_MODE_SKB_CB(skb)->tos);
344 if (x->props.flags & XFRM_STATE_NOECN)
345 dsfield &= ~INET_ECN_MASK;
346 ipv6_change_dsfield(top_iph, 0, dsfield);
347 top_iph->hop_limit = ip6_dst_hoplimit(xfrm_dst_child(dst));
348 top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
349 top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
350 return 0;
351}
352
353static int xfrm6_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
354{
355 struct ipv6hdr *top_iph;
356 struct ip_beet_phdr *ph;
357 int optlen, hdr_len;
358
359 hdr_len = 0;
360 optlen = XFRM_MODE_SKB_CB(skb)->optlen;
361 if (unlikely(optlen))
362 hdr_len += IPV4_BEET_PHMAXLEN - (optlen & 4);
363
364 skb_set_network_header(skb, -x->props.header_len - hdr_len);
365 if (x->sel.family != AF_INET6)
366 skb->network_header += IPV4_BEET_PHMAXLEN;
367 skb->mac_header = skb->network_header +
368 offsetof(struct ipv6hdr, nexthdr);
369 skb->transport_header = skb->network_header + sizeof(*top_iph);
370 ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdr_len);
371
372 xfrm6_beet_make_header(skb);
373
374 top_iph = ipv6_hdr(skb);
375 if (unlikely(optlen)) {
376 if (WARN_ON(optlen < 0))
377 return -EINVAL;
378
379 ph->padlen = 4 - (optlen & 4);
380 ph->hdrlen = optlen / 8;
381 ph->nexthdr = top_iph->nexthdr;
382 if (ph->padlen)
383 memset(ph + 1, IPOPT_NOP, ph->padlen);
384
385 top_iph->nexthdr = IPPROTO_BEETPH;
386 }
387
388 top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
389 top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
390 return 0;
391}
392#endif
393
394/* Add encapsulation header.
395 *
396 * On exit, the transport header will be set to the start of the
397 * encapsulation header to be filled in by x->type->output and the mac
398 * header will be set to the nextheader (protocol for IPv4) field of the
399 * extension header directly preceding the encapsulation header, or in
400 * its absence, that of the top IP header.
401 * The value of the network header will always point to the top IP header
402 * while skb->data will point to the payload.
403 */
404static int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
405{
406 int err;
407
408 err = xfrm_inner_extract_output(x, skb);
409 if (err)
410 return err;
411
412 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
413 skb->protocol = htons(ETH_P_IP);
414
415 switch (x->outer_mode.encap) {
416 case XFRM_MODE_BEET:
417 return xfrm4_beet_encap_add(x, skb);
418 case XFRM_MODE_TUNNEL:
419 return xfrm4_tunnel_encap_add(x, skb);
420 }
421
422 WARN_ON_ONCE(1);
423 return -EOPNOTSUPP;
424}
425
426static int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
427{
428#if IS_ENABLED(CONFIG_IPV6)
429 int err;
430
431 err = xfrm_inner_extract_output(x, skb);
432 if (err)
433 return err;
434
435 skb->ignore_df = 1;
436 skb->protocol = htons(ETH_P_IPV6);
437
438 switch (x->outer_mode.encap) {
439 case XFRM_MODE_BEET:
440 return xfrm6_beet_encap_add(x, skb);
441 case XFRM_MODE_TUNNEL:
442 return xfrm6_tunnel_encap_add(x, skb);
443 default:
444 WARN_ON_ONCE(1);
445 return -EOPNOTSUPP;
446 }
447#endif
448 WARN_ON_ONCE(1);
449 return -EAFNOSUPPORT;
450}
451
452static int xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
453{
454 switch (x->outer_mode.encap) {
455 case XFRM_MODE_BEET:
456 case XFRM_MODE_TUNNEL:
457 if (x->outer_mode.family == AF_INET)
458 return xfrm4_prepare_output(x, skb);
459 if (x->outer_mode.family == AF_INET6)
460 return xfrm6_prepare_output(x, skb);
461 break;
462 case XFRM_MODE_TRANSPORT:
463 if (x->outer_mode.family == AF_INET)
464 return xfrm4_transport_output(x, skb);
465 if (x->outer_mode.family == AF_INET6)
466 return xfrm6_transport_output(x, skb);
467 break;
468 case XFRM_MODE_ROUTEOPTIMIZATION:
469 if (x->outer_mode.family == AF_INET6)
470 return xfrm6_ro_output(x, skb);
471 WARN_ON_ONCE(1);
472 break;
473 default:
474 WARN_ON_ONCE(1);
475 break;
476 }
477
478 return -EOPNOTSUPP;
479}
480
481#if IS_ENABLED(CONFIG_NET_PKTGEN)
482int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
483{
484 return xfrm_outer_mode_output(x, skb);
485}
486EXPORT_SYMBOL_GPL(pktgen_xfrm_outer_mode_output);
487#endif
488
489static int xfrm_output_one(struct sk_buff *skb, int err)
490{
491 struct dst_entry *dst = skb_dst(skb);
492 struct xfrm_state *x = dst->xfrm;
493 struct net *net = xs_net(x);
494
495 if (err <= 0 || x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
496 goto resume;
497
498 do {
499 err = xfrm_skb_check_space(skb);
500 if (err) {
501 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
502 goto error_nolock;
503 }
504
505 skb->mark = xfrm_smark_get(skb->mark, x);
506
507 err = xfrm_outer_mode_output(x, skb);
508 if (err) {
509 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
510 goto error_nolock;
511 }
512
513 spin_lock_bh(&x->lock);
514
515 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
516 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
517 err = -EINVAL;
518 goto error;
519 }
520
521 err = xfrm_state_check_expire(x);
522 if (err) {
523 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED);
524 goto error;
525 }
526
527 err = xfrm_replay_overflow(x, skb);
528 if (err) {
529 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
530 goto error;
531 }
532
533 x->curlft.bytes += skb->len;
534 x->curlft.packets++;
535 x->lastused = ktime_get_real_seconds();
536
537 spin_unlock_bh(&x->lock);
538
539 skb_dst_force(skb);
540 if (!skb_dst(skb)) {
541 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
542 err = -EHOSTUNREACH;
543 goto error_nolock;
544 }
545
546 if (xfrm_offload(skb)) {
547 x->type_offload->encap(x, skb);
548 } else {
549 /* Inner headers are invalid now. */
550 skb->encapsulation = 0;
551
552 err = x->type->output(x, skb);
553 if (err == -EINPROGRESS)
554 goto out;
555 }
556
557resume:
558 if (err) {
559 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR);
560 goto error_nolock;
561 }
562
563 dst = skb_dst_pop(skb);
564 if (!dst) {
565 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
566 err = -EHOSTUNREACH;
567 goto error_nolock;
568 }
569 skb_dst_set(skb, dst);
570 x = dst->xfrm;
571 } while (x && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL));
572
573 return 0;
574
575error:
576 spin_unlock_bh(&x->lock);
577error_nolock:
578 kfree_skb(skb);
579out:
580 return err;
581}
582
583int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err)
584{
585 struct net *net = xs_net(skb_dst(skb)->xfrm);
586
587 while (likely((err = xfrm_output_one(skb, err)) == 0)) {
588 nf_reset_ct(skb);
589
590 err = skb_dst(skb)->ops->local_out(net, sk, skb);
591 if (unlikely(err != 1))
592 goto out;
593
594 if (!skb_dst(skb)->xfrm)
595 return dst_output(net, sk, skb);
596
597 err = nf_hook(skb_dst(skb)->ops->family,
598 NF_INET_POST_ROUTING, net, sk, skb,
599 NULL, skb_dst(skb)->dev, xfrm_output2);
600 if (unlikely(err != 1))
601 goto out;
602 }
603
604 if (err == -EINPROGRESS)
605 err = 0;
606
607out:
608 return err;
609}
610EXPORT_SYMBOL_GPL(xfrm_output_resume);
611
612static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
613{
614 return xfrm_output_resume(sk, skb, 1);
615}
616
617static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
618{
619 struct sk_buff *segs, *nskb;
620
621 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
622 BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_GSO_CB_OFFSET);
623 segs = skb_gso_segment(skb, 0);
624 kfree_skb(skb);
625 if (IS_ERR(segs))
626 return PTR_ERR(segs);
627 if (segs == NULL)
628 return -EINVAL;
629
630 skb_list_walk_safe(segs, segs, nskb) {
631 int err;
632
633 skb_mark_not_on_list(segs);
634 err = xfrm_output2(net, sk, segs);
635
636 if (unlikely(err)) {
637 kfree_skb_list(nskb);
638 return err;
639 }
640 }
641
642 return 0;
643}
644
645/* For partial checksum offload, the outer header checksum is calculated
646 * by software and the inner header checksum is calculated by hardware.
647 * This requires hardware to know the inner packet type to calculate
648 * the inner header checksum. Save inner ip protocol here to avoid
649 * traversing the packet in the vendor's xmit code.
650 * For IPsec tunnel mode save the ip protocol from the IP header of the
651 * plain text packet. Otherwise If the encap type is IPIP, just save
652 * skb->inner_ipproto in any other case get the ip protocol from the IP
653 * header.
654 */
655static void xfrm_get_inner_ipproto(struct sk_buff *skb, struct xfrm_state *x)
656{
657 struct xfrm_offload *xo = xfrm_offload(skb);
658 const struct ethhdr *eth;
659
660 if (!xo)
661 return;
662
663 if (x->outer_mode.encap == XFRM_MODE_TUNNEL) {
664 switch (x->outer_mode.family) {
665 case AF_INET:
666 xo->inner_ipproto = ip_hdr(skb)->protocol;
667 break;
668 case AF_INET6:
669 xo->inner_ipproto = ipv6_hdr(skb)->nexthdr;
670 break;
671 default:
672 break;
673 }
674
675 return;
676 }
677
678 /* non-Tunnel Mode */
679 if (!skb->encapsulation)
680 return;
681
682 if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) {
683 xo->inner_ipproto = skb->inner_ipproto;
684 return;
685 }
686
687 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
688 return;
689
690 eth = (struct ethhdr *)skb_inner_mac_header(skb);
691
692 switch (ntohs(eth->h_proto)) {
693 case ETH_P_IPV6:
694 xo->inner_ipproto = inner_ipv6_hdr(skb)->nexthdr;
695 break;
696 case ETH_P_IP:
697 xo->inner_ipproto = inner_ip_hdr(skb)->protocol;
698 break;
699 }
700}
701
702int xfrm_output(struct sock *sk, struct sk_buff *skb)
703{
704 struct net *net = dev_net(skb_dst(skb)->dev);
705 struct xfrm_state *x = skb_dst(skb)->xfrm;
706 int err;
707
708 switch (x->outer_mode.family) {
709 case AF_INET:
710 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
711 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
712 break;
713 case AF_INET6:
714 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
715
716 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
717 break;
718 }
719
720 if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
721 if (!xfrm_dev_offload_ok(skb, x)) {
722 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
723 kfree_skb(skb);
724 return -EHOSTUNREACH;
725 }
726
727 return xfrm_output_resume(sk, skb, 0);
728 }
729
730 secpath_reset(skb);
731
732 if (xfrm_dev_offload_ok(skb, x)) {
733 struct sec_path *sp;
734
735 sp = secpath_set(skb);
736 if (!sp) {
737 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
738 kfree_skb(skb);
739 return -ENOMEM;
740 }
741
742 sp->olen++;
743 sp->xvec[sp->len++] = x;
744 xfrm_state_hold(x);
745
746 xfrm_get_inner_ipproto(skb, x);
747 skb->encapsulation = 1;
748
749 if (skb_is_gso(skb)) {
750 if (skb->inner_protocol)
751 return xfrm_output_gso(net, sk, skb);
752
753 skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
754 goto out;
755 }
756
757 if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
758 goto out;
759 } else {
760 if (skb_is_gso(skb))
761 return xfrm_output_gso(net, sk, skb);
762 }
763
764 if (skb->ip_summed == CHECKSUM_PARTIAL) {
765 err = skb_checksum_help(skb);
766 if (err) {
767 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
768 kfree_skb(skb);
769 return err;
770 }
771 }
772
773out:
774 return xfrm_output2(net, sk, skb);
775}
776EXPORT_SYMBOL_GPL(xfrm_output);
777
778static int xfrm4_tunnel_check_size(struct sk_buff *skb)
779{
780 int mtu, ret = 0;
781
782 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
783 goto out;
784
785 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df)
786 goto out;
787
788 mtu = dst_mtu(skb_dst(skb));
789 if ((!skb_is_gso(skb) && skb->len > mtu) ||
790 (skb_is_gso(skb) &&
791 !skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) {
792 skb->protocol = htons(ETH_P_IP);
793
794 if (skb->sk)
795 xfrm_local_error(skb, mtu);
796 else
797 icmp_send(skb, ICMP_DEST_UNREACH,
798 ICMP_FRAG_NEEDED, htonl(mtu));
799 ret = -EMSGSIZE;
800 }
801out:
802 return ret;
803}
804
805static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
806{
807 int err;
808
809 if (x->outer_mode.encap == XFRM_MODE_BEET &&
810 ip_is_fragment(ip_hdr(skb))) {
811 net_warn_ratelimited("BEET mode doesn't support inner IPv4 fragments\n");
812 return -EAFNOSUPPORT;
813 }
814
815 err = xfrm4_tunnel_check_size(skb);
816 if (err)
817 return err;
818
819 XFRM_MODE_SKB_CB(skb)->protocol = ip_hdr(skb)->protocol;
820
821 xfrm4_extract_header(skb);
822 return 0;
823}
824
825#if IS_ENABLED(CONFIG_IPV6)
826static int xfrm6_tunnel_check_size(struct sk_buff *skb)
827{
828 int mtu, ret = 0;
829 struct dst_entry *dst = skb_dst(skb);
830
831 if (skb->ignore_df)
832 goto out;
833
834 mtu = dst_mtu(dst);
835 if (mtu < IPV6_MIN_MTU)
836 mtu = IPV6_MIN_MTU;
837
838 if ((!skb_is_gso(skb) && skb->len > mtu) ||
839 (skb_is_gso(skb) &&
840 !skb_gso_validate_network_len(skb, ip6_skb_dst_mtu(skb)))) {
841 skb->dev = dst->dev;
842 skb->protocol = htons(ETH_P_IPV6);
843
844 if (xfrm6_local_dontfrag(skb->sk))
845 ipv6_stub->xfrm6_local_rxpmtu(skb, mtu);
846 else if (skb->sk)
847 xfrm_local_error(skb, mtu);
848 else
849 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
850 ret = -EMSGSIZE;
851 }
852out:
853 return ret;
854}
855#endif
856
857static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
858{
859#if IS_ENABLED(CONFIG_IPV6)
860 int err;
861
862 err = xfrm6_tunnel_check_size(skb);
863 if (err)
864 return err;
865
866 XFRM_MODE_SKB_CB(skb)->protocol = ipv6_hdr(skb)->nexthdr;
867
868 xfrm6_extract_header(skb);
869 return 0;
870#else
871 WARN_ON_ONCE(1);
872 return -EAFNOSUPPORT;
873#endif
874}
875
876static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
877{
878 const struct xfrm_mode *inner_mode;
879
880 if (x->sel.family == AF_UNSPEC)
881 inner_mode = xfrm_ip2inner_mode(x,
882 xfrm_af2proto(skb_dst(skb)->ops->family));
883 else
884 inner_mode = &x->inner_mode;
885
886 if (inner_mode == NULL)
887 return -EAFNOSUPPORT;
888
889 switch (inner_mode->family) {
890 case AF_INET:
891 return xfrm4_extract_output(x, skb);
892 case AF_INET6:
893 return xfrm6_extract_output(x, skb);
894 }
895
896 return -EAFNOSUPPORT;
897}
898
899void xfrm_local_error(struct sk_buff *skb, int mtu)
900{
901 unsigned int proto;
902 struct xfrm_state_afinfo *afinfo;
903
904 if (skb->protocol == htons(ETH_P_IP))
905 proto = AF_INET;
906 else if (skb->protocol == htons(ETH_P_IPV6) &&
907 skb->sk->sk_family == AF_INET6)
908 proto = AF_INET6;
909 else
910 return;
911
912 afinfo = xfrm_state_get_afinfo(proto);
913 if (afinfo) {
914 afinfo->local_error(skb, mtu);
915 rcu_read_unlock();
916 }
917}
918EXPORT_SYMBOL_GPL(xfrm_local_error);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * xfrm_output.c - Common IPsec encapsulation code.
4 *
5 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
6 */
7
8#include <linux/errno.h>
9#include <linux/module.h>
10#include <linux/netdevice.h>
11#include <linux/netfilter.h>
12#include <linux/skbuff.h>
13#include <linux/slab.h>
14#include <linux/spinlock.h>
15#include <net/dst.h>
16#include <net/gso.h>
17#include <net/icmp.h>
18#include <net/inet_ecn.h>
19#include <net/xfrm.h>
20
21#if IS_ENABLED(CONFIG_IPV6)
22#include <net/ip6_route.h>
23#include <net/ipv6_stubs.h>
24#endif
25
26#include "xfrm_inout.h"
27
28static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb);
29static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
30
31static int xfrm_skb_check_space(struct sk_buff *skb)
32{
33 struct dst_entry *dst = skb_dst(skb);
34 int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev)
35 - skb_headroom(skb);
36 int ntail = dst->dev->needed_tailroom - skb_tailroom(skb);
37
38 if (nhead <= 0) {
39 if (ntail <= 0)
40 return 0;
41 nhead = 0;
42 } else if (ntail < 0)
43 ntail = 0;
44
45 return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
46}
47
48/* Children define the path of the packet through the
49 * Linux networking. Thus, destinations are stackable.
50 */
51
52static struct dst_entry *skb_dst_pop(struct sk_buff *skb)
53{
54 struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb)));
55
56 skb_dst_drop(skb);
57 return child;
58}
59
60/* Add encapsulation header.
61 *
62 * The IP header will be moved forward to make space for the encapsulation
63 * header.
64 */
65static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
66{
67 struct iphdr *iph = ip_hdr(skb);
68 int ihl = iph->ihl * 4;
69
70 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
71
72 skb_set_network_header(skb, -x->props.header_len);
73 skb->mac_header = skb->network_header +
74 offsetof(struct iphdr, protocol);
75 skb->transport_header = skb->network_header + ihl;
76 __skb_pull(skb, ihl);
77 memmove(skb_network_header(skb), iph, ihl);
78 return 0;
79}
80
81#if IS_ENABLED(CONFIG_IPV6_MIP6)
82static int mip6_rthdr_offset(struct sk_buff *skb, u8 **nexthdr, int type)
83{
84 const unsigned char *nh = skb_network_header(skb);
85 unsigned int offset = sizeof(struct ipv6hdr);
86 unsigned int packet_len;
87 int found_rhdr = 0;
88
89 packet_len = skb_tail_pointer(skb) - nh;
90 *nexthdr = &ipv6_hdr(skb)->nexthdr;
91
92 while (offset <= packet_len) {
93 struct ipv6_opt_hdr *exthdr;
94
95 switch (**nexthdr) {
96 case NEXTHDR_HOP:
97 break;
98 case NEXTHDR_ROUTING:
99 if (type == IPPROTO_ROUTING && offset + 3 <= packet_len) {
100 struct ipv6_rt_hdr *rt;
101
102 rt = (struct ipv6_rt_hdr *)(nh + offset);
103 if (rt->type != 0)
104 return offset;
105 }
106 found_rhdr = 1;
107 break;
108 case NEXTHDR_DEST:
109 /* HAO MUST NOT appear more than once.
110 * XXX: It is better to try to find by the end of
111 * XXX: packet if HAO exists.
112 */
113 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) {
114 net_dbg_ratelimited("mip6: hao exists already, override\n");
115 return offset;
116 }
117
118 if (found_rhdr)
119 return offset;
120
121 break;
122 default:
123 return offset;
124 }
125
126 if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
127 return -EINVAL;
128
129 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
130 offset);
131 offset += ipv6_optlen(exthdr);
132 if (offset > IPV6_MAXPLEN)
133 return -EINVAL;
134 *nexthdr = &exthdr->nexthdr;
135 }
136
137 return -EINVAL;
138}
139#endif
140
141#if IS_ENABLED(CONFIG_IPV6)
142static int xfrm6_hdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr)
143{
144 switch (x->type->proto) {
145#if IS_ENABLED(CONFIG_IPV6_MIP6)
146 case IPPROTO_DSTOPTS:
147 case IPPROTO_ROUTING:
148 return mip6_rthdr_offset(skb, prevhdr, x->type->proto);
149#endif
150 default:
151 break;
152 }
153
154 return ip6_find_1stfragopt(skb, prevhdr);
155}
156#endif
157
158/* Add encapsulation header.
159 *
160 * The IP header and mutable extension headers will be moved forward to make
161 * space for the encapsulation header.
162 */
163static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
164{
165#if IS_ENABLED(CONFIG_IPV6)
166 struct ipv6hdr *iph;
167 u8 *prevhdr;
168 int hdr_len;
169
170 iph = ipv6_hdr(skb);
171 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
172
173 hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr);
174 if (hdr_len < 0)
175 return hdr_len;
176 skb_set_mac_header(skb,
177 (prevhdr - x->props.header_len) - skb->data);
178 skb_set_network_header(skb, -x->props.header_len);
179 skb->transport_header = skb->network_header + hdr_len;
180 __skb_pull(skb, hdr_len);
181 memmove(ipv6_hdr(skb), iph, hdr_len);
182 return 0;
183#else
184 WARN_ON_ONCE(1);
185 return -EAFNOSUPPORT;
186#endif
187}
188
189/* Add route optimization header space.
190 *
191 * The IP header and mutable extension headers will be moved forward to make
192 * space for the route optimization header.
193 */
194static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
195{
196#if IS_ENABLED(CONFIG_IPV6)
197 struct ipv6hdr *iph;
198 u8 *prevhdr;
199 int hdr_len;
200
201 iph = ipv6_hdr(skb);
202
203 hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr);
204 if (hdr_len < 0)
205 return hdr_len;
206 skb_set_mac_header(skb,
207 (prevhdr - x->props.header_len) - skb->data);
208 skb_set_network_header(skb, -x->props.header_len);
209 skb->transport_header = skb->network_header + hdr_len;
210 __skb_pull(skb, hdr_len);
211 memmove(ipv6_hdr(skb), iph, hdr_len);
212
213 return 0;
214#else
215 WARN_ON_ONCE(1);
216 return -EAFNOSUPPORT;
217#endif
218}
219
220/* Add encapsulation header.
221 *
222 * The top IP header will be constructed per draft-nikander-esp-beet-mode-06.txt.
223 */
224static int xfrm4_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
225{
226 struct ip_beet_phdr *ph;
227 struct iphdr *top_iph;
228 int hdrlen, optlen;
229
230 hdrlen = 0;
231 optlen = XFRM_MODE_SKB_CB(skb)->optlen;
232 if (unlikely(optlen))
233 hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4);
234
235 skb_set_network_header(skb, -x->props.header_len - hdrlen +
236 (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph)));
237 if (x->sel.family != AF_INET6)
238 skb->network_header += IPV4_BEET_PHMAXLEN;
239 skb->mac_header = skb->network_header +
240 offsetof(struct iphdr, protocol);
241 skb->transport_header = skb->network_header + sizeof(*top_iph);
242
243 xfrm4_beet_make_header(skb);
244
245 ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdrlen);
246
247 top_iph = ip_hdr(skb);
248
249 if (unlikely(optlen)) {
250 if (WARN_ON(optlen < 0))
251 return -EINVAL;
252
253 ph->padlen = 4 - (optlen & 4);
254 ph->hdrlen = optlen / 8;
255 ph->nexthdr = top_iph->protocol;
256 if (ph->padlen)
257 memset(ph + 1, IPOPT_NOP, ph->padlen);
258
259 top_iph->protocol = IPPROTO_BEETPH;
260 top_iph->ihl = sizeof(struct iphdr) / 4;
261 }
262
263 top_iph->saddr = x->props.saddr.a4;
264 top_iph->daddr = x->id.daddr.a4;
265
266 return 0;
267}
268
269/* Add encapsulation header.
270 *
271 * The top IP header will be constructed per RFC 2401.
272 */
273static int xfrm4_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
274{
275 bool small_ipv6 = (skb->protocol == htons(ETH_P_IPV6)) && (skb->len <= IPV6_MIN_MTU);
276 struct dst_entry *dst = skb_dst(skb);
277 struct iphdr *top_iph;
278 int flags;
279
280 skb_set_inner_network_header(skb, skb_network_offset(skb));
281 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
282
283 skb_set_network_header(skb, -x->props.header_len);
284 skb->mac_header = skb->network_header +
285 offsetof(struct iphdr, protocol);
286 skb->transport_header = skb->network_header + sizeof(*top_iph);
287 top_iph = ip_hdr(skb);
288
289 top_iph->ihl = 5;
290 top_iph->version = 4;
291
292 top_iph->protocol = xfrm_af2proto(skb_dst(skb)->ops->family);
293
294 /* DS disclosing depends on XFRM_SA_XFLAG_DONT_ENCAP_DSCP */
295 if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP)
296 top_iph->tos = 0;
297 else
298 top_iph->tos = XFRM_MODE_SKB_CB(skb)->tos;
299 top_iph->tos = INET_ECN_encapsulate(top_iph->tos,
300 XFRM_MODE_SKB_CB(skb)->tos);
301
302 flags = x->props.flags;
303 if (flags & XFRM_STATE_NOECN)
304 IP_ECN_clear(top_iph);
305
306 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) || small_ipv6 ?
307 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
308
309 top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst));
310
311 top_iph->saddr = x->props.saddr.a4;
312 top_iph->daddr = x->id.daddr.a4;
313 ip_select_ident(dev_net(dst->dev), skb, NULL);
314
315 return 0;
316}
317
318#if IS_ENABLED(CONFIG_IPV6)
319static int xfrm6_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
320{
321 struct dst_entry *dst = skb_dst(skb);
322 struct ipv6hdr *top_iph;
323 int dsfield;
324
325 skb_set_inner_network_header(skb, skb_network_offset(skb));
326 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
327
328 skb_set_network_header(skb, -x->props.header_len);
329 skb->mac_header = skb->network_header +
330 offsetof(struct ipv6hdr, nexthdr);
331 skb->transport_header = skb->network_header + sizeof(*top_iph);
332 top_iph = ipv6_hdr(skb);
333
334 top_iph->version = 6;
335
336 memcpy(top_iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl,
337 sizeof(top_iph->flow_lbl));
338 top_iph->nexthdr = xfrm_af2proto(skb_dst(skb)->ops->family);
339
340 if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP)
341 dsfield = 0;
342 else
343 dsfield = XFRM_MODE_SKB_CB(skb)->tos;
344 dsfield = INET_ECN_encapsulate(dsfield, XFRM_MODE_SKB_CB(skb)->tos);
345 if (x->props.flags & XFRM_STATE_NOECN)
346 dsfield &= ~INET_ECN_MASK;
347 ipv6_change_dsfield(top_iph, 0, dsfield);
348 top_iph->hop_limit = ip6_dst_hoplimit(xfrm_dst_child(dst));
349 top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
350 top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
351 return 0;
352}
353
354static int xfrm6_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
355{
356 struct ipv6hdr *top_iph;
357 struct ip_beet_phdr *ph;
358 int optlen, hdr_len;
359
360 hdr_len = 0;
361 optlen = XFRM_MODE_SKB_CB(skb)->optlen;
362 if (unlikely(optlen))
363 hdr_len += IPV4_BEET_PHMAXLEN - (optlen & 4);
364
365 skb_set_network_header(skb, -x->props.header_len - hdr_len);
366 if (x->sel.family != AF_INET6)
367 skb->network_header += IPV4_BEET_PHMAXLEN;
368 skb->mac_header = skb->network_header +
369 offsetof(struct ipv6hdr, nexthdr);
370 skb->transport_header = skb->network_header + sizeof(*top_iph);
371 ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdr_len);
372
373 xfrm6_beet_make_header(skb);
374
375 top_iph = ipv6_hdr(skb);
376 if (unlikely(optlen)) {
377 if (WARN_ON(optlen < 0))
378 return -EINVAL;
379
380 ph->padlen = 4 - (optlen & 4);
381 ph->hdrlen = optlen / 8;
382 ph->nexthdr = top_iph->nexthdr;
383 if (ph->padlen)
384 memset(ph + 1, IPOPT_NOP, ph->padlen);
385
386 top_iph->nexthdr = IPPROTO_BEETPH;
387 }
388
389 top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
390 top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
391 return 0;
392}
393#endif
394
395/* Add encapsulation header.
396 *
397 * On exit, the transport header will be set to the start of the
398 * encapsulation header to be filled in by x->type->output and the mac
399 * header will be set to the nextheader (protocol for IPv4) field of the
400 * extension header directly preceding the encapsulation header, or in
401 * its absence, that of the top IP header.
402 * The value of the network header will always point to the top IP header
403 * while skb->data will point to the payload.
404 */
405static int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
406{
407 int err;
408
409 err = xfrm_inner_extract_output(x, skb);
410 if (err)
411 return err;
412
413 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
414 skb->protocol = htons(ETH_P_IP);
415
416 switch (x->props.mode) {
417 case XFRM_MODE_BEET:
418 return xfrm4_beet_encap_add(x, skb);
419 case XFRM_MODE_TUNNEL:
420 return xfrm4_tunnel_encap_add(x, skb);
421 }
422
423 WARN_ON_ONCE(1);
424 return -EOPNOTSUPP;
425}
426
427static int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
428{
429#if IS_ENABLED(CONFIG_IPV6)
430 int err;
431
432 err = xfrm_inner_extract_output(x, skb);
433 if (err)
434 return err;
435
436 skb->ignore_df = 1;
437 skb->protocol = htons(ETH_P_IPV6);
438
439 switch (x->props.mode) {
440 case XFRM_MODE_BEET:
441 return xfrm6_beet_encap_add(x, skb);
442 case XFRM_MODE_TUNNEL:
443 return xfrm6_tunnel_encap_add(x, skb);
444 default:
445 WARN_ON_ONCE(1);
446 return -EOPNOTSUPP;
447 }
448#endif
449 WARN_ON_ONCE(1);
450 return -EAFNOSUPPORT;
451}
452
453static int xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
454{
455 switch (x->props.mode) {
456 case XFRM_MODE_BEET:
457 case XFRM_MODE_TUNNEL:
458 if (x->props.family == AF_INET)
459 return xfrm4_prepare_output(x, skb);
460 if (x->props.family == AF_INET6)
461 return xfrm6_prepare_output(x, skb);
462 break;
463 case XFRM_MODE_TRANSPORT:
464 if (x->props.family == AF_INET)
465 return xfrm4_transport_output(x, skb);
466 if (x->props.family == AF_INET6)
467 return xfrm6_transport_output(x, skb);
468 break;
469 case XFRM_MODE_ROUTEOPTIMIZATION:
470 if (x->props.family == AF_INET6)
471 return xfrm6_ro_output(x, skb);
472 WARN_ON_ONCE(1);
473 break;
474 default:
475 WARN_ON_ONCE(1);
476 break;
477 }
478
479 return -EOPNOTSUPP;
480}
481
482#if IS_ENABLED(CONFIG_NET_PKTGEN)
483int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
484{
485 return xfrm_outer_mode_output(x, skb);
486}
487EXPORT_SYMBOL_GPL(pktgen_xfrm_outer_mode_output);
488#endif
489
490static int xfrm_output_one(struct sk_buff *skb, int err)
491{
492 struct dst_entry *dst = skb_dst(skb);
493 struct xfrm_state *x = dst->xfrm;
494 struct net *net = xs_net(x);
495
496 if (err <= 0 || x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
497 goto resume;
498
499 do {
500 err = xfrm_skb_check_space(skb);
501 if (err) {
502 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
503 goto error_nolock;
504 }
505
506 skb->mark = xfrm_smark_get(skb->mark, x);
507
508 err = xfrm_outer_mode_output(x, skb);
509 if (err) {
510 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
511 goto error_nolock;
512 }
513
514 spin_lock_bh(&x->lock);
515
516 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
517 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
518 err = -EINVAL;
519 goto error;
520 }
521
522 err = xfrm_state_check_expire(x);
523 if (err) {
524 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED);
525 goto error;
526 }
527
528 err = xfrm_replay_overflow(x, skb);
529 if (err) {
530 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
531 goto error;
532 }
533
534 x->curlft.bytes += skb->len;
535 x->curlft.packets++;
536 x->lastused = ktime_get_real_seconds();
537
538 spin_unlock_bh(&x->lock);
539
540 skb_dst_force(skb);
541 if (!skb_dst(skb)) {
542 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
543 err = -EHOSTUNREACH;
544 goto error_nolock;
545 }
546
547 if (xfrm_offload(skb)) {
548 x->type_offload->encap(x, skb);
549 } else {
550 /* Inner headers are invalid now. */
551 skb->encapsulation = 0;
552
553 err = x->type->output(x, skb);
554 if (err == -EINPROGRESS)
555 goto out;
556 }
557
558resume:
559 if (err) {
560 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR);
561 goto error_nolock;
562 }
563
564 dst = skb_dst_pop(skb);
565 if (!dst) {
566 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
567 err = -EHOSTUNREACH;
568 goto error_nolock;
569 }
570 skb_dst_set(skb, dst);
571 x = dst->xfrm;
572 } while (x && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL));
573
574 return 0;
575
576error:
577 spin_unlock_bh(&x->lock);
578error_nolock:
579 kfree_skb(skb);
580out:
581 return err;
582}
583
584int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err)
585{
586 struct net *net = xs_net(skb_dst(skb)->xfrm);
587
588 while (likely((err = xfrm_output_one(skb, err)) == 0)) {
589 nf_reset_ct(skb);
590
591 err = skb_dst(skb)->ops->local_out(net, sk, skb);
592 if (unlikely(err != 1))
593 goto out;
594
595 if (!skb_dst(skb)->xfrm)
596 return dst_output(net, sk, skb);
597
598 err = nf_hook(skb_dst(skb)->ops->family,
599 NF_INET_POST_ROUTING, net, sk, skb,
600 NULL, skb_dst(skb)->dev, xfrm_output2);
601 if (unlikely(err != 1))
602 goto out;
603 }
604
605 if (err == -EINPROGRESS)
606 err = 0;
607
608out:
609 return err;
610}
611EXPORT_SYMBOL_GPL(xfrm_output_resume);
612
613static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
614{
615 return xfrm_output_resume(sk, skb, 1);
616}
617
618static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
619{
620 struct sk_buff *segs, *nskb;
621
622 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
623 BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_GSO_CB_OFFSET);
624 segs = skb_gso_segment(skb, 0);
625 kfree_skb(skb);
626 if (IS_ERR(segs))
627 return PTR_ERR(segs);
628 if (segs == NULL)
629 return -EINVAL;
630
631 skb_list_walk_safe(segs, segs, nskb) {
632 int err;
633
634 skb_mark_not_on_list(segs);
635 err = xfrm_output2(net, sk, segs);
636
637 if (unlikely(err)) {
638 kfree_skb_list(nskb);
639 return err;
640 }
641 }
642
643 return 0;
644}
645
646/* For partial checksum offload, the outer header checksum is calculated
647 * by software and the inner header checksum is calculated by hardware.
648 * This requires hardware to know the inner packet type to calculate
649 * the inner header checksum. Save inner ip protocol here to avoid
650 * traversing the packet in the vendor's xmit code.
651 * For IPsec tunnel mode save the ip protocol from the IP header of the
652 * plain text packet. Otherwise If the encap type is IPIP, just save
653 * skb->inner_ipproto in any other case get the ip protocol from the IP
654 * header.
655 */
656static void xfrm_get_inner_ipproto(struct sk_buff *skb, struct xfrm_state *x)
657{
658 struct xfrm_offload *xo = xfrm_offload(skb);
659 const struct ethhdr *eth;
660
661 if (!xo)
662 return;
663
664 if (x->outer_mode.encap == XFRM_MODE_TUNNEL) {
665 switch (x->outer_mode.family) {
666 case AF_INET:
667 xo->inner_ipproto = ip_hdr(skb)->protocol;
668 break;
669 case AF_INET6:
670 xo->inner_ipproto = ipv6_hdr(skb)->nexthdr;
671 break;
672 default:
673 break;
674 }
675
676 return;
677 }
678
679 /* non-Tunnel Mode */
680 if (!skb->encapsulation)
681 return;
682
683 if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) {
684 xo->inner_ipproto = skb->inner_ipproto;
685 return;
686 }
687
688 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
689 return;
690
691 eth = (struct ethhdr *)skb_inner_mac_header(skb);
692
693 switch (ntohs(eth->h_proto)) {
694 case ETH_P_IPV6:
695 xo->inner_ipproto = inner_ipv6_hdr(skb)->nexthdr;
696 break;
697 case ETH_P_IP:
698 xo->inner_ipproto = inner_ip_hdr(skb)->protocol;
699 break;
700 }
701}
702
703int xfrm_output(struct sock *sk, struct sk_buff *skb)
704{
705 struct net *net = dev_net(skb_dst(skb)->dev);
706 struct xfrm_state *x = skb_dst(skb)->xfrm;
707 int family;
708 int err;
709
710 family = (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) ? x->outer_mode.family
711 : skb_dst(skb)->ops->family;
712
713 switch (family) {
714 case AF_INET:
715 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
716 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
717 break;
718 case AF_INET6:
719 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
720
721 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
722 break;
723 }
724
725 if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
726 if (!xfrm_dev_offload_ok(skb, x)) {
727 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
728 kfree_skb(skb);
729 return -EHOSTUNREACH;
730 }
731
732 return xfrm_output_resume(sk, skb, 0);
733 }
734
735 secpath_reset(skb);
736
737 if (xfrm_dev_offload_ok(skb, x)) {
738 struct sec_path *sp;
739
740 sp = secpath_set(skb);
741 if (!sp) {
742 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
743 kfree_skb(skb);
744 return -ENOMEM;
745 }
746
747 sp->olen++;
748 sp->xvec[sp->len++] = x;
749 xfrm_state_hold(x);
750
751 xfrm_get_inner_ipproto(skb, x);
752 skb->encapsulation = 1;
753
754 if (skb_is_gso(skb)) {
755 if (skb->inner_protocol)
756 return xfrm_output_gso(net, sk, skb);
757
758 skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
759 goto out;
760 }
761
762 if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
763 goto out;
764 } else {
765 if (skb_is_gso(skb))
766 return xfrm_output_gso(net, sk, skb);
767 }
768
769 if (skb->ip_summed == CHECKSUM_PARTIAL) {
770 err = skb_checksum_help(skb);
771 if (err) {
772 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
773 kfree_skb(skb);
774 return err;
775 }
776 }
777
778out:
779 return xfrm_output2(net, sk, skb);
780}
781EXPORT_SYMBOL_GPL(xfrm_output);
782
783static int xfrm4_tunnel_check_size(struct sk_buff *skb)
784{
785 int mtu, ret = 0;
786
787 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
788 goto out;
789
790 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df)
791 goto out;
792
793 mtu = dst_mtu(skb_dst(skb));
794 if ((!skb_is_gso(skb) && skb->len > mtu) ||
795 (skb_is_gso(skb) &&
796 !skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) {
797 skb->protocol = htons(ETH_P_IP);
798
799 if (skb->sk)
800 xfrm_local_error(skb, mtu);
801 else
802 icmp_send(skb, ICMP_DEST_UNREACH,
803 ICMP_FRAG_NEEDED, htonl(mtu));
804 ret = -EMSGSIZE;
805 }
806out:
807 return ret;
808}
809
810static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
811{
812 int err;
813
814 if (x->outer_mode.encap == XFRM_MODE_BEET &&
815 ip_is_fragment(ip_hdr(skb))) {
816 net_warn_ratelimited("BEET mode doesn't support inner IPv4 fragments\n");
817 return -EAFNOSUPPORT;
818 }
819
820 err = xfrm4_tunnel_check_size(skb);
821 if (err)
822 return err;
823
824 XFRM_MODE_SKB_CB(skb)->protocol = ip_hdr(skb)->protocol;
825
826 xfrm4_extract_header(skb);
827 return 0;
828}
829
830#if IS_ENABLED(CONFIG_IPV6)
831static int xfrm6_tunnel_check_size(struct sk_buff *skb)
832{
833 int mtu, ret = 0;
834 struct dst_entry *dst = skb_dst(skb);
835
836 if (skb->ignore_df)
837 goto out;
838
839 mtu = dst_mtu(dst);
840 if (mtu < IPV6_MIN_MTU)
841 mtu = IPV6_MIN_MTU;
842
843 if ((!skb_is_gso(skb) && skb->len > mtu) ||
844 (skb_is_gso(skb) &&
845 !skb_gso_validate_network_len(skb, ip6_skb_dst_mtu(skb)))) {
846 skb->dev = dst->dev;
847 skb->protocol = htons(ETH_P_IPV6);
848
849 if (xfrm6_local_dontfrag(skb->sk))
850 ipv6_stub->xfrm6_local_rxpmtu(skb, mtu);
851 else if (skb->sk)
852 xfrm_local_error(skb, mtu);
853 else
854 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
855 ret = -EMSGSIZE;
856 }
857out:
858 return ret;
859}
860#endif
861
862static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
863{
864#if IS_ENABLED(CONFIG_IPV6)
865 int err;
866
867 err = xfrm6_tunnel_check_size(skb);
868 if (err)
869 return err;
870
871 XFRM_MODE_SKB_CB(skb)->protocol = ipv6_hdr(skb)->nexthdr;
872
873 xfrm6_extract_header(skb);
874 return 0;
875#else
876 WARN_ON_ONCE(1);
877 return -EAFNOSUPPORT;
878#endif
879}
880
881static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
882{
883 switch (skb->protocol) {
884 case htons(ETH_P_IP):
885 return xfrm4_extract_output(x, skb);
886 case htons(ETH_P_IPV6):
887 return xfrm6_extract_output(x, skb);
888 }
889
890 return -EAFNOSUPPORT;
891}
892
893void xfrm_local_error(struct sk_buff *skb, int mtu)
894{
895 unsigned int proto;
896 struct xfrm_state_afinfo *afinfo;
897
898 if (skb->protocol == htons(ETH_P_IP))
899 proto = AF_INET;
900 else if (skb->protocol == htons(ETH_P_IPV6) &&
901 skb->sk->sk_family == AF_INET6)
902 proto = AF_INET6;
903 else
904 return;
905
906 afinfo = xfrm_state_get_afinfo(proto);
907 if (afinfo) {
908 afinfo->local_error(skb, mtu);
909 rcu_read_unlock();
910 }
911}
912EXPORT_SYMBOL_GPL(xfrm_local_error);