Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * IPV4 GSO/GRO offload support
  4 * Linux INET implementation
  5 *
  6 * Copyright (C) 2016 secunet Security Networks AG
  7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
  8 *
  9 * ESP GRO support
 10 */
 11
 12#include <linux/skbuff.h>
 13#include <linux/init.h>
 14#include <net/protocol.h>
 15#include <crypto/aead.h>
 16#include <crypto/authenc.h>
 17#include <linux/err.h>
 18#include <linux/module.h>
 19#include <net/gro.h>
 20#include <net/gso.h>
 21#include <net/ip.h>
 22#include <net/xfrm.h>
 23#include <net/esp.h>
 24#include <linux/scatterlist.h>
 25#include <linux/kernel.h>
 26#include <linux/slab.h>
 27#include <linux/spinlock.h>
 28#include <net/udp.h>
 29
 30static struct sk_buff *esp4_gro_receive(struct list_head *head,
 31					struct sk_buff *skb)
 32{
 33	int offset = skb_gro_offset(skb);
 34	struct xfrm_offload *xo;
 35	struct xfrm_state *x;
 36	int encap_type = 0;
 37	__be32 seq;
 38	__be32 spi;
 
 39
 40	if (!pskb_pull(skb, offset))
 41		return NULL;
 42
 43	if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
 44		goto out;
 45
 46	xo = xfrm_offload(skb);
 47	if (!xo || !(xo->flags & CRYPTO_DONE)) {
 48		struct sec_path *sp = secpath_set(skb);
 49
 50		if (!sp)
 51			goto out;
 52
 53		if (sp->len == XFRM_MAX_DEPTH)
 54			goto out_reset;
 55
 56		x = xfrm_input_state_lookup(dev_net(skb->dev), skb->mark,
 57					    (xfrm_address_t *)&ip_hdr(skb)->daddr,
 58					    spi, IPPROTO_ESP, AF_INET);
 59
 60		if (unlikely(x && x->dir && x->dir != XFRM_SA_DIR_IN)) {
 61			/* non-offload path will record the error and audit log */
 62			xfrm_state_put(x);
 63			x = NULL;
 64		}
 65
 66		if (!x)
 67			goto out_reset;
 68
 69		skb->mark = xfrm_smark_get(skb->mark, x);
 70
 71		sp->xvec[sp->len++] = x;
 72		sp->olen++;
 73
 74		xo = xfrm_offload(skb);
 75		if (!xo)
 
 76			goto out_reset;
 
 77	}
 78
 79	xo->flags |= XFRM_GRO;
 80
 81	if (NAPI_GRO_CB(skb)->proto == IPPROTO_UDP)
 82		encap_type = UDP_ENCAP_ESPINUDP;
 83
 84	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
 85	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
 86	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
 87	XFRM_SPI_SKB_CB(skb)->seq = seq;
 88
 89	/* We don't need to handle errors from xfrm_input, it does all
 90	 * the error handling and frees the resources on error. */
 91	xfrm_input(skb, IPPROTO_ESP, spi, encap_type);
 92
 93	return ERR_PTR(-EINPROGRESS);
 94out_reset:
 95	secpath_reset(skb);
 96out:
 97	skb_push(skb, offset);
 98	NAPI_GRO_CB(skb)->same_flow = 0;
 99	NAPI_GRO_CB(skb)->flush = 1;
100
101	return NULL;
102}
103
104static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
105{
106	struct ip_esp_hdr *esph;
107	struct iphdr *iph = ip_hdr(skb);
108	struct xfrm_offload *xo = xfrm_offload(skb);
109	int proto = iph->protocol;
110
111	skb_push(skb, -skb_network_offset(skb));
112	esph = ip_esp_hdr(skb);
113	*skb_mac_header(skb) = IPPROTO_ESP;
114
115	esph->spi = x->id.spi;
116	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
117
118	xo->proto = proto;
119}
120
121static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
122						struct sk_buff *skb,
123						netdev_features_t features)
124{
125	__be16 type = x->inner_mode.family == AF_INET6 ? htons(ETH_P_IPV6)
126						       : htons(ETH_P_IP);
127
128	return skb_eth_gso_segment(skb, features, type);
129}
130
131static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
132						   struct sk_buff *skb,
133						   netdev_features_t features)
134{
135	const struct net_offload *ops;
136	struct sk_buff *segs = ERR_PTR(-EINVAL);
137	struct xfrm_offload *xo = xfrm_offload(skb);
138
139	skb->transport_header += x->props.header_len;
140	ops = rcu_dereference(inet_offloads[xo->proto]);
141	if (likely(ops && ops->callbacks.gso_segment))
142		segs = ops->callbacks.gso_segment(skb, features);
143
144	return segs;
145}
146
147static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
148					      struct sk_buff *skb,
149					      netdev_features_t features)
150{
151	struct xfrm_offload *xo = xfrm_offload(skb);
152	struct sk_buff *segs = ERR_PTR(-EINVAL);
153	const struct net_offload *ops;
154	u8 proto = xo->proto;
155
156	skb->transport_header += x->props.header_len;
157
158	if (x->sel.family != AF_INET6) {
159		if (proto == IPPROTO_BEETPH) {
160			struct ip_beet_phdr *ph =
161				(struct ip_beet_phdr *)skb->data;
162
163			skb->transport_header += ph->hdrlen * 8;
164			proto = ph->nexthdr;
165		} else {
166			skb->transport_header -= IPV4_BEET_PHMAXLEN;
167		}
168	} else {
169		__be16 frag;
170
171		skb->transport_header +=
172			ipv6_skip_exthdr(skb, 0, &proto, &frag);
173		if (proto == IPPROTO_TCP)
174			skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
175	}
176
177	if (proto == IPPROTO_IPV6)
178		skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
179
180	__skb_pull(skb, skb_transport_offset(skb));
181	ops = rcu_dereference(inet_offloads[proto]);
182	if (likely(ops && ops->callbacks.gso_segment))
183		segs = ops->callbacks.gso_segment(skb, features);
184
185	return segs;
186}
187
188static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
189						    struct sk_buff *skb,
190						    netdev_features_t features)
191{
192	switch (x->outer_mode.encap) {
193	case XFRM_MODE_TUNNEL:
194		return xfrm4_tunnel_gso_segment(x, skb, features);
195	case XFRM_MODE_TRANSPORT:
196		return xfrm4_transport_gso_segment(x, skb, features);
197	case XFRM_MODE_BEET:
198		return xfrm4_beet_gso_segment(x, skb, features);
199	}
200
201	return ERR_PTR(-EOPNOTSUPP);
202}
203
204static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
205				        netdev_features_t features)
206{
207	struct xfrm_state *x;
208	struct ip_esp_hdr *esph;
209	struct crypto_aead *aead;
210	netdev_features_t esp_features = features;
211	struct xfrm_offload *xo = xfrm_offload(skb);
212	struct sec_path *sp;
213
214	if (!xo)
215		return ERR_PTR(-EINVAL);
216
217	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
218		return ERR_PTR(-EINVAL);
219
220	sp = skb_sec_path(skb);
221	x = sp->xvec[sp->len - 1];
222	aead = x->data;
223	esph = ip_esp_hdr(skb);
224
225	if (esph->spi != x->id.spi)
226		return ERR_PTR(-EINVAL);
227
228	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
229		return ERR_PTR(-EINVAL);
230
231	__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
232
233	skb->encap_hdr_csum = 1;
234
235	if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
236	     !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
237		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
238					    NETIF_F_SCTP_CRC);
239	else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
240		 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
241		esp_features = features & ~(NETIF_F_CSUM_MASK |
242					    NETIF_F_SCTP_CRC);
243
244	xo->flags |= XFRM_GSO_SEGMENT;
245
246	return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
247}
248
249static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
250{
251	struct crypto_aead *aead = x->data;
252	struct xfrm_offload *xo = xfrm_offload(skb);
253
254	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
255		return -EINVAL;
256
257	if (!(xo->flags & CRYPTO_DONE))
258		skb->ip_summed = CHECKSUM_NONE;
259
260	return esp_input_done2(skb, 0);
261}
262
263static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
264{
265	int err;
266	int alen;
267	int blksize;
268	struct xfrm_offload *xo;
269	struct ip_esp_hdr *esph;
270	struct crypto_aead *aead;
271	struct esp_info esp;
272	bool hw_offload = true;
273	__u32 seq;
274	int encap_type = 0;
275
276	esp.inplace = true;
277
278	xo = xfrm_offload(skb);
279
280	if (!xo)
281		return -EINVAL;
282
283	if ((!(features & NETIF_F_HW_ESP) &&
284	     !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
285	    x->xso.dev != skb->dev) {
286		xo->flags |= CRYPTO_FALLBACK;
287		hw_offload = false;
288	}
289
290	esp.proto = xo->proto;
291
292	/* skb is pure payload to encrypt */
293
294	aead = x->data;
295	alen = crypto_aead_authsize(aead);
296
297	esp.tfclen = 0;
298	/* XXX: Add support for tfc padding here. */
299
300	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
301	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
302	esp.plen = esp.clen - skb->len - esp.tfclen;
303	esp.tailen = esp.tfclen + esp.plen + alen;
304
305	esp.esph = ip_esp_hdr(skb);
306
307	if (x->encap)
308		encap_type = x->encap->encap_type;
309
310	if (!hw_offload || !skb_is_gso(skb) || (hw_offload && encap_type == UDP_ENCAP_ESPINUDP)) {
311		esp.nfrags = esp_output_head(x, skb, &esp);
312		if (esp.nfrags < 0)
313			return esp.nfrags;
314	}
315
316	seq = xo->seq.low;
317
318	esph = esp.esph;
319	esph->spi = x->id.spi;
320
321	skb_push(skb, -skb_network_offset(skb));
322
323	if (xo->flags & XFRM_GSO_SEGMENT) {
324		esph->seq_no = htonl(seq);
325
326		if (!skb_is_gso(skb))
327			xo->seq.low++;
328		else
329			xo->seq.low += skb_shinfo(skb)->gso_segs;
330	}
331
332	if (xo->seq.low < seq)
333		xo->seq.hi++;
334
335	esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
336
337	if (hw_offload && encap_type == UDP_ENCAP_ESPINUDP) {
338		/* In the XFRM stack, the encapsulation protocol is set to iphdr->protocol by
339		 * setting *skb_mac_header(skb) (see esp_output_udp_encap()) where skb->mac_header
340		 * points to iphdr->protocol (see xfrm4_tunnel_encap_add()).
341		 * However, in esp_xmit(), skb->mac_header doesn't point to iphdr->protocol.
342		 * Therefore, the protocol field needs to be corrected.
343		 */
344		ip_hdr(skb)->protocol = IPPROTO_UDP;
345
346		esph->seq_no = htonl(seq);
347	}
348
349	ip_hdr(skb)->tot_len = htons(skb->len);
350	ip_send_check(ip_hdr(skb));
351
352	if (hw_offload) {
353		if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
354			return -ENOMEM;
355
356		xo = xfrm_offload(skb);
357		if (!xo)
358			return -EINVAL;
359
360		xo->flags |= XFRM_XMIT;
361		return 0;
362	}
363
364	err = esp_output_tail(x, skb, &esp);
365	if (err)
366		return err;
367
368	secpath_reset(skb);
369
370	if (skb_needs_linearize(skb, skb->dev->features) &&
371	    __skb_linearize(skb))
372		return -ENOMEM;
373	return 0;
374}
375
376static const struct net_offload esp4_offload = {
377	.callbacks = {
378		.gro_receive = esp4_gro_receive,
379		.gso_segment = esp4_gso_segment,
380	},
381};
382
383static const struct xfrm_type_offload esp_type_offload = {
 
384	.owner		= THIS_MODULE,
385	.proto	     	= IPPROTO_ESP,
386	.input_tail	= esp_input_tail,
387	.xmit		= esp_xmit,
388	.encap		= esp4_gso_encap,
389};
390
391static int __init esp4_offload_init(void)
392{
393	if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
394		pr_info("%s: can't add xfrm type offload\n", __func__);
395		return -EAGAIN;
396	}
397
398	return inet_add_offload(&esp4_offload, IPPROTO_ESP);
399}
400
401static void __exit esp4_offload_exit(void)
402{
403	xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
404	inet_del_offload(&esp4_offload, IPPROTO_ESP);
405}
406
407module_init(esp4_offload_init);
408module_exit(esp4_offload_exit);
409MODULE_LICENSE("GPL");
410MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
411MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
412MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * IPV4 GSO/GRO offload support
  4 * Linux INET implementation
  5 *
  6 * Copyright (C) 2016 secunet Security Networks AG
  7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
  8 *
  9 * ESP GRO support
 10 */
 11
 12#include <linux/skbuff.h>
 13#include <linux/init.h>
 14#include <net/protocol.h>
 15#include <crypto/aead.h>
 16#include <crypto/authenc.h>
 17#include <linux/err.h>
 18#include <linux/module.h>
 
 
 19#include <net/ip.h>
 20#include <net/xfrm.h>
 21#include <net/esp.h>
 22#include <linux/scatterlist.h>
 23#include <linux/kernel.h>
 24#include <linux/slab.h>
 25#include <linux/spinlock.h>
 26#include <net/udp.h>
 27
 28static struct sk_buff *esp4_gro_receive(struct list_head *head,
 29					struct sk_buff *skb)
 30{
 31	int offset = skb_gro_offset(skb);
 32	struct xfrm_offload *xo;
 33	struct xfrm_state *x;
 
 34	__be32 seq;
 35	__be32 spi;
 36	int err;
 37
 38	if (!pskb_pull(skb, offset))
 39		return NULL;
 40
 41	if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
 42		goto out;
 43
 44	xo = xfrm_offload(skb);
 45	if (!xo || !(xo->flags & CRYPTO_DONE)) {
 46		struct sec_path *sp = secpath_set(skb);
 47
 48		if (!sp)
 49			goto out;
 50
 51		if (sp->len == XFRM_MAX_DEPTH)
 52			goto out_reset;
 53
 54		x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
 55				      (xfrm_address_t *)&ip_hdr(skb)->daddr,
 56				      spi, IPPROTO_ESP, AF_INET);
 
 
 
 
 
 
 
 57		if (!x)
 58			goto out_reset;
 59
 
 
 60		sp->xvec[sp->len++] = x;
 61		sp->olen++;
 62
 63		xo = xfrm_offload(skb);
 64		if (!xo) {
 65			xfrm_state_put(x);
 66			goto out_reset;
 67		}
 68	}
 69
 70	xo->flags |= XFRM_GRO;
 71
 
 
 
 72	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
 73	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
 74	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
 75	XFRM_SPI_SKB_CB(skb)->seq = seq;
 76
 77	/* We don't need to handle errors from xfrm_input, it does all
 78	 * the error handling and frees the resources on error. */
 79	xfrm_input(skb, IPPROTO_ESP, spi, -2);
 80
 81	return ERR_PTR(-EINPROGRESS);
 82out_reset:
 83	secpath_reset(skb);
 84out:
 85	skb_push(skb, offset);
 86	NAPI_GRO_CB(skb)->same_flow = 0;
 87	NAPI_GRO_CB(skb)->flush = 1;
 88
 89	return NULL;
 90}
 91
 92static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
 93{
 94	struct ip_esp_hdr *esph;
 95	struct iphdr *iph = ip_hdr(skb);
 96	struct xfrm_offload *xo = xfrm_offload(skb);
 97	int proto = iph->protocol;
 98
 99	skb_push(skb, -skb_network_offset(skb));
100	esph = ip_esp_hdr(skb);
101	*skb_mac_header(skb) = IPPROTO_ESP;
102
103	esph->spi = x->id.spi;
104	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
105
106	xo->proto = proto;
107}
108
109static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
110						struct sk_buff *skb,
111						netdev_features_t features)
112{
113	__skb_push(skb, skb->mac_len);
114	return skb_mac_gso_segment(skb, features);
 
 
115}
116
117static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
118						   struct sk_buff *skb,
119						   netdev_features_t features)
120{
121	const struct net_offload *ops;
122	struct sk_buff *segs = ERR_PTR(-EINVAL);
123	struct xfrm_offload *xo = xfrm_offload(skb);
124
125	skb->transport_header += x->props.header_len;
126	ops = rcu_dereference(inet_offloads[xo->proto]);
127	if (likely(ops && ops->callbacks.gso_segment))
128		segs = ops->callbacks.gso_segment(skb, features);
129
130	return segs;
131}
132
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
134						    struct sk_buff *skb,
135						    netdev_features_t features)
136{
137	switch (x->outer_mode.encap) {
138	case XFRM_MODE_TUNNEL:
139		return xfrm4_tunnel_gso_segment(x, skb, features);
140	case XFRM_MODE_TRANSPORT:
141		return xfrm4_transport_gso_segment(x, skb, features);
 
 
142	}
143
144	return ERR_PTR(-EOPNOTSUPP);
145}
146
147static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
148				        netdev_features_t features)
149{
150	struct xfrm_state *x;
151	struct ip_esp_hdr *esph;
152	struct crypto_aead *aead;
153	netdev_features_t esp_features = features;
154	struct xfrm_offload *xo = xfrm_offload(skb);
155	struct sec_path *sp;
156
157	if (!xo)
158		return ERR_PTR(-EINVAL);
159
160	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
161		return ERR_PTR(-EINVAL);
162
163	sp = skb_sec_path(skb);
164	x = sp->xvec[sp->len - 1];
165	aead = x->data;
166	esph = ip_esp_hdr(skb);
167
168	if (esph->spi != x->id.spi)
169		return ERR_PTR(-EINVAL);
170
171	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
172		return ERR_PTR(-EINVAL);
173
174	__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
175
176	skb->encap_hdr_csum = 1;
177
178	if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
179	     !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
180		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
 
181	else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
182		 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
183		esp_features = features & ~NETIF_F_CSUM_MASK;
 
184
185	xo->flags |= XFRM_GSO_SEGMENT;
186
187	return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
188}
189
190static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
191{
192	struct crypto_aead *aead = x->data;
193	struct xfrm_offload *xo = xfrm_offload(skb);
194
195	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
196		return -EINVAL;
197
198	if (!(xo->flags & CRYPTO_DONE))
199		skb->ip_summed = CHECKSUM_NONE;
200
201	return esp_input_done2(skb, 0);
202}
203
204static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
205{
206	int err;
207	int alen;
208	int blksize;
209	struct xfrm_offload *xo;
210	struct ip_esp_hdr *esph;
211	struct crypto_aead *aead;
212	struct esp_info esp;
213	bool hw_offload = true;
214	__u32 seq;
 
215
216	esp.inplace = true;
217
218	xo = xfrm_offload(skb);
219
220	if (!xo)
221		return -EINVAL;
222
223	if ((!(features & NETIF_F_HW_ESP) &&
224	     !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
225	    x->xso.dev != skb->dev) {
226		xo->flags |= CRYPTO_FALLBACK;
227		hw_offload = false;
228	}
229
230	esp.proto = xo->proto;
231
232	/* skb is pure payload to encrypt */
233
234	aead = x->data;
235	alen = crypto_aead_authsize(aead);
236
237	esp.tfclen = 0;
238	/* XXX: Add support for tfc padding here. */
239
240	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
241	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
242	esp.plen = esp.clen - skb->len - esp.tfclen;
243	esp.tailen = esp.tfclen + esp.plen + alen;
244
245	esp.esph = ip_esp_hdr(skb);
246
 
 
247
248	if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
249		esp.nfrags = esp_output_head(x, skb, &esp);
250		if (esp.nfrags < 0)
251			return esp.nfrags;
252	}
253
254	seq = xo->seq.low;
255
256	esph = esp.esph;
257	esph->spi = x->id.spi;
258
259	skb_push(skb, -skb_network_offset(skb));
260
261	if (xo->flags & XFRM_GSO_SEGMENT) {
262		esph->seq_no = htonl(seq);
263
264		if (!skb_is_gso(skb))
265			xo->seq.low++;
266		else
267			xo->seq.low += skb_shinfo(skb)->gso_segs;
268	}
269
 
 
 
270	esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
271
 
 
 
 
 
 
 
 
 
 
 
 
272	ip_hdr(skb)->tot_len = htons(skb->len);
273	ip_send_check(ip_hdr(skb));
274
275	if (hw_offload)
 
 
 
 
 
 
 
 
276		return 0;
 
277
278	err = esp_output_tail(x, skb, &esp);
279	if (err)
280		return err;
281
282	secpath_reset(skb);
283
 
 
 
284	return 0;
285}
286
287static const struct net_offload esp4_offload = {
288	.callbacks = {
289		.gro_receive = esp4_gro_receive,
290		.gso_segment = esp4_gso_segment,
291	},
292};
293
294static const struct xfrm_type_offload esp_type_offload = {
295	.description	= "ESP4 OFFLOAD",
296	.owner		= THIS_MODULE,
297	.proto	     	= IPPROTO_ESP,
298	.input_tail	= esp_input_tail,
299	.xmit		= esp_xmit,
300	.encap		= esp4_gso_encap,
301};
302
303static int __init esp4_offload_init(void)
304{
305	if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
306		pr_info("%s: can't add xfrm type offload\n", __func__);
307		return -EAGAIN;
308	}
309
310	return inet_add_offload(&esp4_offload, IPPROTO_ESP);
311}
312
313static void __exit esp4_offload_exit(void)
314{
315	xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
316	inet_del_offload(&esp4_offload, IPPROTO_ESP);
317}
318
319module_init(esp4_offload_init);
320module_exit(esp4_offload_exit);
321MODULE_LICENSE("GPL");
322MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
323MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);