Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v4.17
 
  1/*
  2 * IPV4 GSO/GRO offload support
  3 * Linux INET implementation
  4 *
  5 * Copyright (C) 2016 secunet Security Networks AG
  6 * Author: Steffen Klassert <steffen.klassert@secunet.com>
  7 *
  8 * This program is free software; you can redistribute it and/or modify it
  9 * under the terms and conditions of the GNU General Public License,
 10 * version 2, as published by the Free Software Foundation.
 11 *
 12 * ESP GRO support
 13 */
 14
 15#include <linux/skbuff.h>
 16#include <linux/init.h>
 17#include <net/protocol.h>
 18#include <crypto/aead.h>
 19#include <crypto/authenc.h>
 20#include <linux/err.h>
 21#include <linux/module.h>
 22#include <net/ip.h>
 23#include <net/xfrm.h>
 24#include <net/esp.h>
 25#include <linux/scatterlist.h>
 26#include <linux/kernel.h>
 27#include <linux/slab.h>
 28#include <linux/spinlock.h>
 29#include <net/udp.h>
 30
 31static struct sk_buff **esp4_gro_receive(struct sk_buff **head,
 32					 struct sk_buff *skb)
 33{
 34	int offset = skb_gro_offset(skb);
 35	struct xfrm_offload *xo;
 36	struct xfrm_state *x;
 37	__be32 seq;
 38	__be32 spi;
 39	int err;
 40
 41	if (!pskb_pull(skb, offset))
 42		return NULL;
 43
 44	if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
 45		goto out;
 46
 47	xo = xfrm_offload(skb);
 48	if (!xo || !(xo->flags & CRYPTO_DONE)) {
 49		err = secpath_set(skb);
 50		if (err)
 51			goto out;
 52
 53		if (skb->sp->len == XFRM_MAX_DEPTH)
 54			goto out;
 55
 
 
 
 56		x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
 57				      (xfrm_address_t *)&ip_hdr(skb)->daddr,
 58				      spi, IPPROTO_ESP, AF_INET);
 59		if (!x)
 60			goto out;
 61
 62		skb->sp->xvec[skb->sp->len++] = x;
 63		skb->sp->olen++;
 
 
 64
 65		xo = xfrm_offload(skb);
 66		if (!xo) {
 67			xfrm_state_put(x);
 68			goto out;
 69		}
 70	}
 71
 72	xo->flags |= XFRM_GRO;
 73
 74	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
 75	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
 76	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
 77	XFRM_SPI_SKB_CB(skb)->seq = seq;
 78
 79	/* We don't need to handle errors from xfrm_input, it does all
 80	 * the error handling and frees the resources on error. */
 81	xfrm_input(skb, IPPROTO_ESP, spi, -2);
 82
 83	return ERR_PTR(-EINPROGRESS);
 
 
 84out:
 85	skb_push(skb, offset);
 86	NAPI_GRO_CB(skb)->same_flow = 0;
 87	NAPI_GRO_CB(skb)->flush = 1;
 88
 89	return NULL;
 90}
 91
 92static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
 93{
 94	struct ip_esp_hdr *esph;
 95	struct iphdr *iph = ip_hdr(skb);
 96	struct xfrm_offload *xo = xfrm_offload(skb);
 97	int proto = iph->protocol;
 98
 99	skb_push(skb, -skb_network_offset(skb));
100	esph = ip_esp_hdr(skb);
101	*skb_mac_header(skb) = IPPROTO_ESP;
102
103	esph->spi = x->id.spi;
104	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
105
106	xo->proto = proto;
107}
108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
110				        netdev_features_t features)
111{
112	struct xfrm_state *x;
113	struct ip_esp_hdr *esph;
114	struct crypto_aead *aead;
115	netdev_features_t esp_features = features;
116	struct xfrm_offload *xo = xfrm_offload(skb);
 
117
118	if (!xo)
119		return ERR_PTR(-EINVAL);
120
121	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
122		return ERR_PTR(-EINVAL);
123
124	x = skb->sp->xvec[skb->sp->len - 1];
 
125	aead = x->data;
126	esph = ip_esp_hdr(skb);
127
128	if (esph->spi != x->id.spi)
129		return ERR_PTR(-EINVAL);
130
131	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
132		return ERR_PTR(-EINVAL);
133
134	__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
135
136	skb->encap_hdr_csum = 1;
137
138	if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
139	    (x->xso.dev != skb->dev))
140		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
141	else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
142		esp_features = features & ~NETIF_F_CSUM_MASK;
 
 
 
143
144	xo->flags |= XFRM_GSO_SEGMENT;
145
146	return x->outer_mode->gso_segment(x, skb, esp_features);
147}
148
149static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
150{
151	struct crypto_aead *aead = x->data;
152	struct xfrm_offload *xo = xfrm_offload(skb);
153
154	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
155		return -EINVAL;
156
157	if (!(xo->flags & CRYPTO_DONE))
158		skb->ip_summed = CHECKSUM_NONE;
159
160	return esp_input_done2(skb, 0);
161}
162
163static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
164{
165	int err;
166	int alen;
167	int blksize;
168	struct xfrm_offload *xo;
169	struct ip_esp_hdr *esph;
170	struct crypto_aead *aead;
171	struct esp_info esp;
172	bool hw_offload = true;
173	__u32 seq;
174
175	esp.inplace = true;
176
177	xo = xfrm_offload(skb);
178
179	if (!xo)
180		return -EINVAL;
181
182	if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
183	    (x->xso.dev != skb->dev)) {
 
184		xo->flags |= CRYPTO_FALLBACK;
185		hw_offload = false;
186	}
187
188	esp.proto = xo->proto;
189
190	/* skb is pure payload to encrypt */
191
192	aead = x->data;
193	alen = crypto_aead_authsize(aead);
194
195	esp.tfclen = 0;
196	/* XXX: Add support for tfc padding here. */
197
198	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
199	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
200	esp.plen = esp.clen - skb->len - esp.tfclen;
201	esp.tailen = esp.tfclen + esp.plen + alen;
202
203	esp.esph = ip_esp_hdr(skb);
204
205
206	if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
207		esp.nfrags = esp_output_head(x, skb, &esp);
208		if (esp.nfrags < 0)
209			return esp.nfrags;
210	}
211
212	seq = xo->seq.low;
213
214	esph = esp.esph;
215	esph->spi = x->id.spi;
216
217	skb_push(skb, -skb_network_offset(skb));
218
219	if (xo->flags & XFRM_GSO_SEGMENT) {
220		esph->seq_no = htonl(seq);
221
222		if (!skb_is_gso(skb))
223			xo->seq.low++;
224		else
225			xo->seq.low += skb_shinfo(skb)->gso_segs;
226	}
227
228	esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
229
230	ip_hdr(skb)->tot_len = htons(skb->len);
231	ip_send_check(ip_hdr(skb));
232
233	if (hw_offload)
 
 
 
 
 
 
 
 
234		return 0;
 
235
236	err = esp_output_tail(x, skb, &esp);
237	if (err)
238		return err;
239
240	secpath_reset(skb);
241
242	return 0;
243}
244
245static const struct net_offload esp4_offload = {
246	.callbacks = {
247		.gro_receive = esp4_gro_receive,
248		.gso_segment = esp4_gso_segment,
249	},
250};
251
252static const struct xfrm_type_offload esp_type_offload = {
253	.description	= "ESP4 OFFLOAD",
254	.owner		= THIS_MODULE,
255	.proto	     	= IPPROTO_ESP,
256	.input_tail	= esp_input_tail,
257	.xmit		= esp_xmit,
258	.encap		= esp4_gso_encap,
259};
260
261static int __init esp4_offload_init(void)
262{
263	if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
264		pr_info("%s: can't add xfrm type offload\n", __func__);
265		return -EAGAIN;
266	}
267
268	return inet_add_offload(&esp4_offload, IPPROTO_ESP);
269}
270
271static void __exit esp4_offload_exit(void)
272{
273	if (xfrm_unregister_type_offload(&esp_type_offload, AF_INET) < 0)
274		pr_info("%s: can't remove xfrm type offload\n", __func__);
275
276	inet_del_offload(&esp4_offload, IPPROTO_ESP);
277}
278
279module_init(esp4_offload_init);
280module_exit(esp4_offload_exit);
281MODULE_LICENSE("GPL");
282MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
283MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * IPV4 GSO/GRO offload support
  4 * Linux INET implementation
  5 *
  6 * Copyright (C) 2016 secunet Security Networks AG
  7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
  8 *
 
 
 
 
  9 * ESP GRO support
 10 */
 11
 12#include <linux/skbuff.h>
 13#include <linux/init.h>
 14#include <net/protocol.h>
 15#include <crypto/aead.h>
 16#include <crypto/authenc.h>
 17#include <linux/err.h>
 18#include <linux/module.h>
 19#include <net/ip.h>
 20#include <net/xfrm.h>
 21#include <net/esp.h>
 22#include <linux/scatterlist.h>
 23#include <linux/kernel.h>
 24#include <linux/slab.h>
 25#include <linux/spinlock.h>
 26#include <net/udp.h>
 27
 28static struct sk_buff *esp4_gro_receive(struct list_head *head,
 29					struct sk_buff *skb)
 30{
 31	int offset = skb_gro_offset(skb);
 32	struct xfrm_offload *xo;
 33	struct xfrm_state *x;
 34	__be32 seq;
 35	__be32 spi;
 
 36
 37	if (!pskb_pull(skb, offset))
 38		return NULL;
 39
 40	if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
 41		goto out;
 42
 43	xo = xfrm_offload(skb);
 44	if (!xo || !(xo->flags & CRYPTO_DONE)) {
 45		struct sec_path *sp = secpath_set(skb);
 
 
 46
 47		if (!sp)
 48			goto out;
 49
 50		if (sp->len == XFRM_MAX_DEPTH)
 51			goto out_reset;
 52
 53		x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
 54				      (xfrm_address_t *)&ip_hdr(skb)->daddr,
 55				      spi, IPPROTO_ESP, AF_INET);
 56		if (!x)
 57			goto out_reset;
 58
 59		skb->mark = xfrm_smark_get(skb->mark, x);
 60
 61		sp->xvec[sp->len++] = x;
 62		sp->olen++;
 63
 64		xo = xfrm_offload(skb);
 65		if (!xo)
 66			goto out_reset;
 
 
 67	}
 68
 69	xo->flags |= XFRM_GRO;
 70
 71	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
 72	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
 73	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
 74	XFRM_SPI_SKB_CB(skb)->seq = seq;
 75
 76	/* We don't need to handle errors from xfrm_input, it does all
 77	 * the error handling and frees the resources on error. */
 78	xfrm_input(skb, IPPROTO_ESP, spi, -2);
 79
 80	return ERR_PTR(-EINPROGRESS);
 81out_reset:
 82	secpath_reset(skb);
 83out:
 84	skb_push(skb, offset);
 85	NAPI_GRO_CB(skb)->same_flow = 0;
 86	NAPI_GRO_CB(skb)->flush = 1;
 87
 88	return NULL;
 89}
 90
 91static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
 92{
 93	struct ip_esp_hdr *esph;
 94	struct iphdr *iph = ip_hdr(skb);
 95	struct xfrm_offload *xo = xfrm_offload(skb);
 96	int proto = iph->protocol;
 97
 98	skb_push(skb, -skb_network_offset(skb));
 99	esph = ip_esp_hdr(skb);
100	*skb_mac_header(skb) = IPPROTO_ESP;
101
102	esph->spi = x->id.spi;
103	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
104
105	xo->proto = proto;
106}
107
108static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
109						struct sk_buff *skb,
110						netdev_features_t features)
111{
112	__skb_push(skb, skb->mac_len);
113	return skb_mac_gso_segment(skb, features);
114}
115
116static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
117						   struct sk_buff *skb,
118						   netdev_features_t features)
119{
120	const struct net_offload *ops;
121	struct sk_buff *segs = ERR_PTR(-EINVAL);
122	struct xfrm_offload *xo = xfrm_offload(skb);
123
124	skb->transport_header += x->props.header_len;
125	ops = rcu_dereference(inet_offloads[xo->proto]);
126	if (likely(ops && ops->callbacks.gso_segment))
127		segs = ops->callbacks.gso_segment(skb, features);
128
129	return segs;
130}
131
132static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
133					      struct sk_buff *skb,
134					      netdev_features_t features)
135{
136	struct xfrm_offload *xo = xfrm_offload(skb);
137	struct sk_buff *segs = ERR_PTR(-EINVAL);
138	const struct net_offload *ops;
139	u8 proto = xo->proto;
140
141	skb->transport_header += x->props.header_len;
142
143	if (x->sel.family != AF_INET6) {
144		if (proto == IPPROTO_BEETPH) {
145			struct ip_beet_phdr *ph =
146				(struct ip_beet_phdr *)skb->data;
147
148			skb->transport_header += ph->hdrlen * 8;
149			proto = ph->nexthdr;
150		} else {
151			skb->transport_header -= IPV4_BEET_PHMAXLEN;
152		}
153	} else {
154		__be16 frag;
155
156		skb->transport_header +=
157			ipv6_skip_exthdr(skb, 0, &proto, &frag);
158		if (proto == IPPROTO_TCP)
159			skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
160	}
161
162	__skb_pull(skb, skb_transport_offset(skb));
163	ops = rcu_dereference(inet_offloads[proto]);
164	if (likely(ops && ops->callbacks.gso_segment))
165		segs = ops->callbacks.gso_segment(skb, features);
166
167	return segs;
168}
169
170static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
171						    struct sk_buff *skb,
172						    netdev_features_t features)
173{
174	switch (x->outer_mode.encap) {
175	case XFRM_MODE_TUNNEL:
176		return xfrm4_tunnel_gso_segment(x, skb, features);
177	case XFRM_MODE_TRANSPORT:
178		return xfrm4_transport_gso_segment(x, skb, features);
179	case XFRM_MODE_BEET:
180		return xfrm4_beet_gso_segment(x, skb, features);
181	}
182
183	return ERR_PTR(-EOPNOTSUPP);
184}
185
186static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
187				        netdev_features_t features)
188{
189	struct xfrm_state *x;
190	struct ip_esp_hdr *esph;
191	struct crypto_aead *aead;
192	netdev_features_t esp_features = features;
193	struct xfrm_offload *xo = xfrm_offload(skb);
194	struct sec_path *sp;
195
196	if (!xo)
197		return ERR_PTR(-EINVAL);
198
199	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
200		return ERR_PTR(-EINVAL);
201
202	sp = skb_sec_path(skb);
203	x = sp->xvec[sp->len - 1];
204	aead = x->data;
205	esph = ip_esp_hdr(skb);
206
207	if (esph->spi != x->id.spi)
208		return ERR_PTR(-EINVAL);
209
210	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
211		return ERR_PTR(-EINVAL);
212
213	__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
214
215	skb->encap_hdr_csum = 1;
216
217	if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
218	     !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
219		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
220					    NETIF_F_SCTP_CRC);
221	else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
222		 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
223		esp_features = features & ~(NETIF_F_CSUM_MASK |
224					    NETIF_F_SCTP_CRC);
225
226	xo->flags |= XFRM_GSO_SEGMENT;
227
228	return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
229}
230
231static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
232{
233	struct crypto_aead *aead = x->data;
234	struct xfrm_offload *xo = xfrm_offload(skb);
235
236	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
237		return -EINVAL;
238
239	if (!(xo->flags & CRYPTO_DONE))
240		skb->ip_summed = CHECKSUM_NONE;
241
242	return esp_input_done2(skb, 0);
243}
244
245static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
246{
247	int err;
248	int alen;
249	int blksize;
250	struct xfrm_offload *xo;
251	struct ip_esp_hdr *esph;
252	struct crypto_aead *aead;
253	struct esp_info esp;
254	bool hw_offload = true;
255	__u32 seq;
256
257	esp.inplace = true;
258
259	xo = xfrm_offload(skb);
260
261	if (!xo)
262		return -EINVAL;
263
264	if ((!(features & NETIF_F_HW_ESP) &&
265	     !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
266	    x->xso.dev != skb->dev) {
267		xo->flags |= CRYPTO_FALLBACK;
268		hw_offload = false;
269	}
270
271	esp.proto = xo->proto;
272
273	/* skb is pure payload to encrypt */
274
275	aead = x->data;
276	alen = crypto_aead_authsize(aead);
277
278	esp.tfclen = 0;
279	/* XXX: Add support for tfc padding here. */
280
281	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
282	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
283	esp.plen = esp.clen - skb->len - esp.tfclen;
284	esp.tailen = esp.tfclen + esp.plen + alen;
285
286	esp.esph = ip_esp_hdr(skb);
287
288
289	if (!hw_offload || !skb_is_gso(skb)) {
290		esp.nfrags = esp_output_head(x, skb, &esp);
291		if (esp.nfrags < 0)
292			return esp.nfrags;
293	}
294
295	seq = xo->seq.low;
296
297	esph = esp.esph;
298	esph->spi = x->id.spi;
299
300	skb_push(skb, -skb_network_offset(skb));
301
302	if (xo->flags & XFRM_GSO_SEGMENT) {
303		esph->seq_no = htonl(seq);
304
305		if (!skb_is_gso(skb))
306			xo->seq.low++;
307		else
308			xo->seq.low += skb_shinfo(skb)->gso_segs;
309	}
310
311	esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
312
313	ip_hdr(skb)->tot_len = htons(skb->len);
314	ip_send_check(ip_hdr(skb));
315
316	if (hw_offload) {
317		if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
318			return -ENOMEM;
319
320		xo = xfrm_offload(skb);
321		if (!xo)
322			return -EINVAL;
323
324		xo->flags |= XFRM_XMIT;
325		return 0;
326	}
327
328	err = esp_output_tail(x, skb, &esp);
329	if (err)
330		return err;
331
332	secpath_reset(skb);
333
334	return 0;
335}
336
337static const struct net_offload esp4_offload = {
338	.callbacks = {
339		.gro_receive = esp4_gro_receive,
340		.gso_segment = esp4_gso_segment,
341	},
342};
343
344static const struct xfrm_type_offload esp_type_offload = {
 
345	.owner		= THIS_MODULE,
346	.proto	     	= IPPROTO_ESP,
347	.input_tail	= esp_input_tail,
348	.xmit		= esp_xmit,
349	.encap		= esp4_gso_encap,
350};
351
352static int __init esp4_offload_init(void)
353{
354	if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
355		pr_info("%s: can't add xfrm type offload\n", __func__);
356		return -EAGAIN;
357	}
358
359	return inet_add_offload(&esp4_offload, IPPROTO_ESP);
360}
361
362static void __exit esp4_offload_exit(void)
363{
364	xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
 
 
365	inet_del_offload(&esp4_offload, IPPROTO_ESP);
366}
367
368module_init(esp4_offload_init);
369module_exit(esp4_offload_exit);
370MODULE_LICENSE("GPL");
371MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
372MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
373MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");