Loading...
1/*
2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
4 *
5 * Copyright (C) 2016 secunet Security Networks AG
6 * Author: Steffen Klassert <steffen.klassert@secunet.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * ESP GRO support
13 */
14
15#include <linux/skbuff.h>
16#include <linux/init.h>
17#include <net/protocol.h>
18#include <crypto/aead.h>
19#include <crypto/authenc.h>
20#include <linux/err.h>
21#include <linux/module.h>
22#include <net/ip.h>
23#include <net/xfrm.h>
24#include <net/esp.h>
25#include <linux/scatterlist.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <net/udp.h>
30
31static struct sk_buff **esp4_gro_receive(struct sk_buff **head,
32 struct sk_buff *skb)
33{
34 int offset = skb_gro_offset(skb);
35 struct xfrm_offload *xo;
36 struct xfrm_state *x;
37 __be32 seq;
38 __be32 spi;
39 int err;
40
41 if (!pskb_pull(skb, offset))
42 return NULL;
43
44 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
45 goto out;
46
47 xo = xfrm_offload(skb);
48 if (!xo || !(xo->flags & CRYPTO_DONE)) {
49 err = secpath_set(skb);
50 if (err)
51 goto out;
52
53 if (skb->sp->len == XFRM_MAX_DEPTH)
54 goto out;
55
56 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
57 (xfrm_address_t *)&ip_hdr(skb)->daddr,
58 spi, IPPROTO_ESP, AF_INET);
59 if (!x)
60 goto out;
61
62 skb->sp->xvec[skb->sp->len++] = x;
63 skb->sp->olen++;
64
65 xo = xfrm_offload(skb);
66 if (!xo) {
67 xfrm_state_put(x);
68 goto out;
69 }
70 }
71
72 xo->flags |= XFRM_GRO;
73
74 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
75 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
76 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
77 XFRM_SPI_SKB_CB(skb)->seq = seq;
78
79 /* We don't need to handle errors from xfrm_input, it does all
80 * the error handling and frees the resources on error. */
81 xfrm_input(skb, IPPROTO_ESP, spi, -2);
82
83 return ERR_PTR(-EINPROGRESS);
84out:
85 skb_push(skb, offset);
86 NAPI_GRO_CB(skb)->same_flow = 0;
87 NAPI_GRO_CB(skb)->flush = 1;
88
89 return NULL;
90}
91
92static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
93{
94 struct ip_esp_hdr *esph;
95 struct iphdr *iph = ip_hdr(skb);
96 struct xfrm_offload *xo = xfrm_offload(skb);
97 int proto = iph->protocol;
98
99 skb_push(skb, -skb_network_offset(skb));
100 esph = ip_esp_hdr(skb);
101 *skb_mac_header(skb) = IPPROTO_ESP;
102
103 esph->spi = x->id.spi;
104 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
105
106 xo->proto = proto;
107}
108
109static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
110 netdev_features_t features)
111{
112 struct xfrm_state *x;
113 struct ip_esp_hdr *esph;
114 struct crypto_aead *aead;
115 netdev_features_t esp_features = features;
116 struct xfrm_offload *xo = xfrm_offload(skb);
117
118 if (!xo)
119 return ERR_PTR(-EINVAL);
120
121 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
122 return ERR_PTR(-EINVAL);
123
124 x = skb->sp->xvec[skb->sp->len - 1];
125 aead = x->data;
126 esph = ip_esp_hdr(skb);
127
128 if (esph->spi != x->id.spi)
129 return ERR_PTR(-EINVAL);
130
131 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
132 return ERR_PTR(-EINVAL);
133
134 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
135
136 skb->encap_hdr_csum = 1;
137
138 if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
139 (x->xso.dev != skb->dev))
140 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
141 else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
142 esp_features = features & ~NETIF_F_CSUM_MASK;
143
144 xo->flags |= XFRM_GSO_SEGMENT;
145
146 return x->outer_mode->gso_segment(x, skb, esp_features);
147}
148
149static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
150{
151 struct crypto_aead *aead = x->data;
152 struct xfrm_offload *xo = xfrm_offload(skb);
153
154 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
155 return -EINVAL;
156
157 if (!(xo->flags & CRYPTO_DONE))
158 skb->ip_summed = CHECKSUM_NONE;
159
160 return esp_input_done2(skb, 0);
161}
162
163static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
164{
165 int err;
166 int alen;
167 int blksize;
168 struct xfrm_offload *xo;
169 struct ip_esp_hdr *esph;
170 struct crypto_aead *aead;
171 struct esp_info esp;
172 bool hw_offload = true;
173 __u32 seq;
174
175 esp.inplace = true;
176
177 xo = xfrm_offload(skb);
178
179 if (!xo)
180 return -EINVAL;
181
182 if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
183 (x->xso.dev != skb->dev)) {
184 xo->flags |= CRYPTO_FALLBACK;
185 hw_offload = false;
186 }
187
188 esp.proto = xo->proto;
189
190 /* skb is pure payload to encrypt */
191
192 aead = x->data;
193 alen = crypto_aead_authsize(aead);
194
195 esp.tfclen = 0;
196 /* XXX: Add support for tfc padding here. */
197
198 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
199 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
200 esp.plen = esp.clen - skb->len - esp.tfclen;
201 esp.tailen = esp.tfclen + esp.plen + alen;
202
203 esp.esph = ip_esp_hdr(skb);
204
205
206 if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
207 esp.nfrags = esp_output_head(x, skb, &esp);
208 if (esp.nfrags < 0)
209 return esp.nfrags;
210 }
211
212 seq = xo->seq.low;
213
214 esph = esp.esph;
215 esph->spi = x->id.spi;
216
217 skb_push(skb, -skb_network_offset(skb));
218
219 if (xo->flags & XFRM_GSO_SEGMENT) {
220 esph->seq_no = htonl(seq);
221
222 if (!skb_is_gso(skb))
223 xo->seq.low++;
224 else
225 xo->seq.low += skb_shinfo(skb)->gso_segs;
226 }
227
228 esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
229
230 ip_hdr(skb)->tot_len = htons(skb->len);
231 ip_send_check(ip_hdr(skb));
232
233 if (hw_offload)
234 return 0;
235
236 err = esp_output_tail(x, skb, &esp);
237 if (err)
238 return err;
239
240 secpath_reset(skb);
241
242 return 0;
243}
244
245static const struct net_offload esp4_offload = {
246 .callbacks = {
247 .gro_receive = esp4_gro_receive,
248 .gso_segment = esp4_gso_segment,
249 },
250};
251
252static const struct xfrm_type_offload esp_type_offload = {
253 .description = "ESP4 OFFLOAD",
254 .owner = THIS_MODULE,
255 .proto = IPPROTO_ESP,
256 .input_tail = esp_input_tail,
257 .xmit = esp_xmit,
258 .encap = esp4_gso_encap,
259};
260
261static int __init esp4_offload_init(void)
262{
263 if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
264 pr_info("%s: can't add xfrm type offload\n", __func__);
265 return -EAGAIN;
266 }
267
268 return inet_add_offload(&esp4_offload, IPPROTO_ESP);
269}
270
271static void __exit esp4_offload_exit(void)
272{
273 if (xfrm_unregister_type_offload(&esp_type_offload, AF_INET) < 0)
274 pr_info("%s: can't remove xfrm type offload\n", __func__);
275
276 inet_del_offload(&esp4_offload, IPPROTO_ESP);
277}
278
279module_init(esp4_offload_init);
280module_exit(esp4_offload_exit);
281MODULE_LICENSE("GPL");
282MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
283MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
5 *
6 * Copyright (C) 2016 secunet Security Networks AG
7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
8 *
9 * ESP GRO support
10 */
11
12#include <linux/skbuff.h>
13#include <linux/init.h>
14#include <net/protocol.h>
15#include <crypto/aead.h>
16#include <crypto/authenc.h>
17#include <linux/err.h>
18#include <linux/module.h>
19#include <net/gro.h>
20#include <net/gso.h>
21#include <net/ip.h>
22#include <net/xfrm.h>
23#include <net/esp.h>
24#include <linux/scatterlist.h>
25#include <linux/kernel.h>
26#include <linux/slab.h>
27#include <linux/spinlock.h>
28#include <net/udp.h>
29
30static struct sk_buff *esp4_gro_receive(struct list_head *head,
31 struct sk_buff *skb)
32{
33 int offset = skb_gro_offset(skb);
34 struct xfrm_offload *xo;
35 struct xfrm_state *x;
36 int encap_type = 0;
37 __be32 seq;
38 __be32 spi;
39
40 if (!pskb_pull(skb, offset))
41 return NULL;
42
43 if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
44 goto out;
45
46 xo = xfrm_offload(skb);
47 if (!xo || !(xo->flags & CRYPTO_DONE)) {
48 struct sec_path *sp = secpath_set(skb);
49
50 if (!sp)
51 goto out;
52
53 if (sp->len == XFRM_MAX_DEPTH)
54 goto out_reset;
55
56 x = xfrm_input_state_lookup(dev_net(skb->dev), skb->mark,
57 (xfrm_address_t *)&ip_hdr(skb)->daddr,
58 spi, IPPROTO_ESP, AF_INET);
59
60 if (unlikely(x && x->dir && x->dir != XFRM_SA_DIR_IN)) {
61 /* non-offload path will record the error and audit log */
62 xfrm_state_put(x);
63 x = NULL;
64 }
65
66 if (!x)
67 goto out_reset;
68
69 skb->mark = xfrm_smark_get(skb->mark, x);
70
71 sp->xvec[sp->len++] = x;
72 sp->olen++;
73
74 xo = xfrm_offload(skb);
75 if (!xo)
76 goto out_reset;
77 }
78
79 xo->flags |= XFRM_GRO;
80
81 if (NAPI_GRO_CB(skb)->proto == IPPROTO_UDP)
82 encap_type = UDP_ENCAP_ESPINUDP;
83
84 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
85 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
86 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
87 XFRM_SPI_SKB_CB(skb)->seq = seq;
88
89 /* We don't need to handle errors from xfrm_input, it does all
90 * the error handling and frees the resources on error. */
91 xfrm_input(skb, IPPROTO_ESP, spi, encap_type);
92
93 return ERR_PTR(-EINPROGRESS);
94out_reset:
95 secpath_reset(skb);
96out:
97 skb_push(skb, offset);
98 NAPI_GRO_CB(skb)->same_flow = 0;
99 NAPI_GRO_CB(skb)->flush = 1;
100
101 return NULL;
102}
103
104static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
105{
106 struct ip_esp_hdr *esph;
107 struct iphdr *iph = ip_hdr(skb);
108 struct xfrm_offload *xo = xfrm_offload(skb);
109 int proto = iph->protocol;
110
111 skb_push(skb, -skb_network_offset(skb));
112 esph = ip_esp_hdr(skb);
113 *skb_mac_header(skb) = IPPROTO_ESP;
114
115 esph->spi = x->id.spi;
116 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
117
118 xo->proto = proto;
119}
120
121static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
122 struct sk_buff *skb,
123 netdev_features_t features)
124{
125 __be16 type = x->inner_mode.family == AF_INET6 ? htons(ETH_P_IPV6)
126 : htons(ETH_P_IP);
127
128 return skb_eth_gso_segment(skb, features, type);
129}
130
131static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
132 struct sk_buff *skb,
133 netdev_features_t features)
134{
135 const struct net_offload *ops;
136 struct sk_buff *segs = ERR_PTR(-EINVAL);
137 struct xfrm_offload *xo = xfrm_offload(skb);
138
139 skb->transport_header += x->props.header_len;
140 ops = rcu_dereference(inet_offloads[xo->proto]);
141 if (likely(ops && ops->callbacks.gso_segment))
142 segs = ops->callbacks.gso_segment(skb, features);
143
144 return segs;
145}
146
147static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
148 struct sk_buff *skb,
149 netdev_features_t features)
150{
151 struct xfrm_offload *xo = xfrm_offload(skb);
152 struct sk_buff *segs = ERR_PTR(-EINVAL);
153 const struct net_offload *ops;
154 u8 proto = xo->proto;
155
156 skb->transport_header += x->props.header_len;
157
158 if (x->sel.family != AF_INET6) {
159 if (proto == IPPROTO_BEETPH) {
160 struct ip_beet_phdr *ph =
161 (struct ip_beet_phdr *)skb->data;
162
163 skb->transport_header += ph->hdrlen * 8;
164 proto = ph->nexthdr;
165 } else {
166 skb->transport_header -= IPV4_BEET_PHMAXLEN;
167 }
168 } else {
169 __be16 frag;
170
171 skb->transport_header +=
172 ipv6_skip_exthdr(skb, 0, &proto, &frag);
173 if (proto == IPPROTO_TCP)
174 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
175 }
176
177 if (proto == IPPROTO_IPV6)
178 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
179
180 __skb_pull(skb, skb_transport_offset(skb));
181 ops = rcu_dereference(inet_offloads[proto]);
182 if (likely(ops && ops->callbacks.gso_segment))
183 segs = ops->callbacks.gso_segment(skb, features);
184
185 return segs;
186}
187
188static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
189 struct sk_buff *skb,
190 netdev_features_t features)
191{
192 switch (x->outer_mode.encap) {
193 case XFRM_MODE_TUNNEL:
194 return xfrm4_tunnel_gso_segment(x, skb, features);
195 case XFRM_MODE_TRANSPORT:
196 return xfrm4_transport_gso_segment(x, skb, features);
197 case XFRM_MODE_BEET:
198 return xfrm4_beet_gso_segment(x, skb, features);
199 }
200
201 return ERR_PTR(-EOPNOTSUPP);
202}
203
204static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
205 netdev_features_t features)
206{
207 struct xfrm_state *x;
208 struct ip_esp_hdr *esph;
209 struct crypto_aead *aead;
210 netdev_features_t esp_features = features;
211 struct xfrm_offload *xo = xfrm_offload(skb);
212 struct sec_path *sp;
213
214 if (!xo)
215 return ERR_PTR(-EINVAL);
216
217 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
218 return ERR_PTR(-EINVAL);
219
220 sp = skb_sec_path(skb);
221 x = sp->xvec[sp->len - 1];
222 aead = x->data;
223 esph = ip_esp_hdr(skb);
224
225 if (esph->spi != x->id.spi)
226 return ERR_PTR(-EINVAL);
227
228 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
229 return ERR_PTR(-EINVAL);
230
231 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
232
233 skb->encap_hdr_csum = 1;
234
235 if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
236 !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
237 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
238 NETIF_F_SCTP_CRC);
239 else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
240 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
241 esp_features = features & ~(NETIF_F_CSUM_MASK |
242 NETIF_F_SCTP_CRC);
243
244 xo->flags |= XFRM_GSO_SEGMENT;
245
246 return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
247}
248
249static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
250{
251 struct crypto_aead *aead = x->data;
252 struct xfrm_offload *xo = xfrm_offload(skb);
253
254 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
255 return -EINVAL;
256
257 if (!(xo->flags & CRYPTO_DONE))
258 skb->ip_summed = CHECKSUM_NONE;
259
260 return esp_input_done2(skb, 0);
261}
262
263static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
264{
265 int err;
266 int alen;
267 int blksize;
268 struct xfrm_offload *xo;
269 struct ip_esp_hdr *esph;
270 struct crypto_aead *aead;
271 struct esp_info esp;
272 bool hw_offload = true;
273 __u32 seq;
274 int encap_type = 0;
275
276 esp.inplace = true;
277
278 xo = xfrm_offload(skb);
279
280 if (!xo)
281 return -EINVAL;
282
283 if ((!(features & NETIF_F_HW_ESP) &&
284 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
285 x->xso.dev != skb->dev) {
286 xo->flags |= CRYPTO_FALLBACK;
287 hw_offload = false;
288 }
289
290 esp.proto = xo->proto;
291
292 /* skb is pure payload to encrypt */
293
294 aead = x->data;
295 alen = crypto_aead_authsize(aead);
296
297 esp.tfclen = 0;
298 /* XXX: Add support for tfc padding here. */
299
300 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
301 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
302 esp.plen = esp.clen - skb->len - esp.tfclen;
303 esp.tailen = esp.tfclen + esp.plen + alen;
304
305 esp.esph = ip_esp_hdr(skb);
306
307 if (x->encap)
308 encap_type = x->encap->encap_type;
309
310 if (!hw_offload || !skb_is_gso(skb) || (hw_offload && encap_type == UDP_ENCAP_ESPINUDP)) {
311 esp.nfrags = esp_output_head(x, skb, &esp);
312 if (esp.nfrags < 0)
313 return esp.nfrags;
314 }
315
316 seq = xo->seq.low;
317
318 esph = esp.esph;
319 esph->spi = x->id.spi;
320
321 skb_push(skb, -skb_network_offset(skb));
322
323 if (xo->flags & XFRM_GSO_SEGMENT) {
324 esph->seq_no = htonl(seq);
325
326 if (!skb_is_gso(skb))
327 xo->seq.low++;
328 else
329 xo->seq.low += skb_shinfo(skb)->gso_segs;
330 }
331
332 if (xo->seq.low < seq)
333 xo->seq.hi++;
334
335 esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
336
337 if (hw_offload && encap_type == UDP_ENCAP_ESPINUDP) {
338 /* In the XFRM stack, the encapsulation protocol is set to iphdr->protocol by
339 * setting *skb_mac_header(skb) (see esp_output_udp_encap()) where skb->mac_header
340 * points to iphdr->protocol (see xfrm4_tunnel_encap_add()).
341 * However, in esp_xmit(), skb->mac_header doesn't point to iphdr->protocol.
342 * Therefore, the protocol field needs to be corrected.
343 */
344 ip_hdr(skb)->protocol = IPPROTO_UDP;
345
346 esph->seq_no = htonl(seq);
347 }
348
349 ip_hdr(skb)->tot_len = htons(skb->len);
350 ip_send_check(ip_hdr(skb));
351
352 if (hw_offload) {
353 if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
354 return -ENOMEM;
355
356 xo = xfrm_offload(skb);
357 if (!xo)
358 return -EINVAL;
359
360 xo->flags |= XFRM_XMIT;
361 return 0;
362 }
363
364 err = esp_output_tail(x, skb, &esp);
365 if (err)
366 return err;
367
368 secpath_reset(skb);
369
370 if (skb_needs_linearize(skb, skb->dev->features) &&
371 __skb_linearize(skb))
372 return -ENOMEM;
373 return 0;
374}
375
376static const struct net_offload esp4_offload = {
377 .callbacks = {
378 .gro_receive = esp4_gro_receive,
379 .gso_segment = esp4_gso_segment,
380 },
381};
382
383static const struct xfrm_type_offload esp_type_offload = {
384 .owner = THIS_MODULE,
385 .proto = IPPROTO_ESP,
386 .input_tail = esp_input_tail,
387 .xmit = esp_xmit,
388 .encap = esp4_gso_encap,
389};
390
391static int __init esp4_offload_init(void)
392{
393 if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
394 pr_info("%s: can't add xfrm type offload\n", __func__);
395 return -EAGAIN;
396 }
397
398 return inet_add_offload(&esp4_offload, IPPROTO_ESP);
399}
400
401static void __exit esp4_offload_exit(void)
402{
403 xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
404 inet_del_offload(&esp4_offload, IPPROTO_ESP);
405}
406
407module_init(esp4_offload_init);
408module_exit(esp4_offload_exit);
409MODULE_LICENSE("GPL");
410MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
411MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
412MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");