Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * IPV4 GSO/GRO offload support
  4 * Linux INET implementation
  5 *
  6 * Copyright (C) 2016 secunet Security Networks AG
  7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
  8 *
  9 * ESP GRO support
 10 */
 11
 12#include <linux/skbuff.h>
 13#include <linux/init.h>
 14#include <net/protocol.h>
 15#include <crypto/aead.h>
 16#include <crypto/authenc.h>
 17#include <linux/err.h>
 18#include <linux/module.h>
 
 
 19#include <net/ip.h>
 20#include <net/xfrm.h>
 21#include <net/esp.h>
 22#include <linux/scatterlist.h>
 23#include <linux/kernel.h>
 24#include <linux/slab.h>
 25#include <linux/spinlock.h>
 26#include <net/udp.h>
 27
 28static struct sk_buff *esp4_gro_receive(struct list_head *head,
 29					struct sk_buff *skb)
 30{
 31	int offset = skb_gro_offset(skb);
 32	struct xfrm_offload *xo;
 33	struct xfrm_state *x;
 
 34	__be32 seq;
 35	__be32 spi;
 36	int err;
 37
 38	if (!pskb_pull(skb, offset))
 39		return NULL;
 40
 41	if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
 42		goto out;
 43
 44	xo = xfrm_offload(skb);
 45	if (!xo || !(xo->flags & CRYPTO_DONE)) {
 46		struct sec_path *sp = secpath_set(skb);
 47
 48		if (!sp)
 49			goto out;
 50
 51		if (sp->len == XFRM_MAX_DEPTH)
 52			goto out_reset;
 53
 54		x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
 55				      (xfrm_address_t *)&ip_hdr(skb)->daddr,
 56				      spi, IPPROTO_ESP, AF_INET);
 57		if (!x)
 58			goto out_reset;
 59
 60		skb->mark = xfrm_smark_get(skb->mark, x);
 61
 62		sp->xvec[sp->len++] = x;
 63		sp->olen++;
 64
 65		xo = xfrm_offload(skb);
 66		if (!xo)
 67			goto out_reset;
 68	}
 69
 70	xo->flags |= XFRM_GRO;
 71
 
 
 
 72	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
 73	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
 74	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
 75	XFRM_SPI_SKB_CB(skb)->seq = seq;
 76
 77	/* We don't need to handle errors from xfrm_input, it does all
 78	 * the error handling and frees the resources on error. */
 79	xfrm_input(skb, IPPROTO_ESP, spi, -2);
 80
 81	return ERR_PTR(-EINPROGRESS);
 82out_reset:
 83	secpath_reset(skb);
 84out:
 85	skb_push(skb, offset);
 86	NAPI_GRO_CB(skb)->same_flow = 0;
 87	NAPI_GRO_CB(skb)->flush = 1;
 88
 89	return NULL;
 90}
 91
 92static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
 93{
 94	struct ip_esp_hdr *esph;
 95	struct iphdr *iph = ip_hdr(skb);
 96	struct xfrm_offload *xo = xfrm_offload(skb);
 97	int proto = iph->protocol;
 98
 99	skb_push(skb, -skb_network_offset(skb));
100	esph = ip_esp_hdr(skb);
101	*skb_mac_header(skb) = IPPROTO_ESP;
102
103	esph->spi = x->id.spi;
104	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
105
106	xo->proto = proto;
107}
108
109static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
110						struct sk_buff *skb,
111						netdev_features_t features)
112{
113	__skb_push(skb, skb->mac_len);
114	return skb_mac_gso_segment(skb, features);
 
 
115}
116
117static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
118						   struct sk_buff *skb,
119						   netdev_features_t features)
120{
121	const struct net_offload *ops;
122	struct sk_buff *segs = ERR_PTR(-EINVAL);
123	struct xfrm_offload *xo = xfrm_offload(skb);
124
125	skb->transport_header += x->props.header_len;
126	ops = rcu_dereference(inet_offloads[xo->proto]);
127	if (likely(ops && ops->callbacks.gso_segment))
128		segs = ops->callbacks.gso_segment(skb, features);
129
130	return segs;
131}
132
133static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
134					      struct sk_buff *skb,
135					      netdev_features_t features)
136{
137	struct xfrm_offload *xo = xfrm_offload(skb);
138	struct sk_buff *segs = ERR_PTR(-EINVAL);
139	const struct net_offload *ops;
140	u8 proto = xo->proto;
141
142	skb->transport_header += x->props.header_len;
143
144	if (x->sel.family != AF_INET6) {
145		if (proto == IPPROTO_BEETPH) {
146			struct ip_beet_phdr *ph =
147				(struct ip_beet_phdr *)skb->data;
148
149			skb->transport_header += ph->hdrlen * 8;
150			proto = ph->nexthdr;
151		} else {
152			skb->transport_header -= IPV4_BEET_PHMAXLEN;
153		}
154	} else {
155		__be16 frag;
156
157		skb->transport_header +=
158			ipv6_skip_exthdr(skb, 0, &proto, &frag);
159		if (proto == IPPROTO_TCP)
160			skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
161	}
162
 
 
 
163	__skb_pull(skb, skb_transport_offset(skb));
164	ops = rcu_dereference(inet_offloads[proto]);
165	if (likely(ops && ops->callbacks.gso_segment))
166		segs = ops->callbacks.gso_segment(skb, features);
167
168	return segs;
169}
170
171static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
172						    struct sk_buff *skb,
173						    netdev_features_t features)
174{
175	switch (x->outer_mode.encap) {
176	case XFRM_MODE_TUNNEL:
177		return xfrm4_tunnel_gso_segment(x, skb, features);
178	case XFRM_MODE_TRANSPORT:
179		return xfrm4_transport_gso_segment(x, skb, features);
180	case XFRM_MODE_BEET:
181		return xfrm4_beet_gso_segment(x, skb, features);
182	}
183
184	return ERR_PTR(-EOPNOTSUPP);
185}
186
187static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
188				        netdev_features_t features)
189{
190	struct xfrm_state *x;
191	struct ip_esp_hdr *esph;
192	struct crypto_aead *aead;
193	netdev_features_t esp_features = features;
194	struct xfrm_offload *xo = xfrm_offload(skb);
195	struct sec_path *sp;
196
197	if (!xo)
198		return ERR_PTR(-EINVAL);
199
200	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
201		return ERR_PTR(-EINVAL);
202
203	sp = skb_sec_path(skb);
204	x = sp->xvec[sp->len - 1];
205	aead = x->data;
206	esph = ip_esp_hdr(skb);
207
208	if (esph->spi != x->id.spi)
209		return ERR_PTR(-EINVAL);
210
211	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
212		return ERR_PTR(-EINVAL);
213
214	__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
215
216	skb->encap_hdr_csum = 1;
217
218	if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
219	     !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
220		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
 
221	else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
222		 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
223		esp_features = features & ~NETIF_F_CSUM_MASK;
 
224
225	xo->flags |= XFRM_GSO_SEGMENT;
226
227	return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
228}
229
230static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
231{
232	struct crypto_aead *aead = x->data;
233	struct xfrm_offload *xo = xfrm_offload(skb);
234
235	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
236		return -EINVAL;
237
238	if (!(xo->flags & CRYPTO_DONE))
239		skb->ip_summed = CHECKSUM_NONE;
240
241	return esp_input_done2(skb, 0);
242}
243
244static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
245{
246	int err;
247	int alen;
248	int blksize;
249	struct xfrm_offload *xo;
250	struct ip_esp_hdr *esph;
251	struct crypto_aead *aead;
252	struct esp_info esp;
253	bool hw_offload = true;
254	__u32 seq;
255
256	esp.inplace = true;
257
258	xo = xfrm_offload(skb);
259
260	if (!xo)
261		return -EINVAL;
262
263	if ((!(features & NETIF_F_HW_ESP) &&
264	     !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
265	    x->xso.dev != skb->dev) {
266		xo->flags |= CRYPTO_FALLBACK;
267		hw_offload = false;
268	}
269
270	esp.proto = xo->proto;
271
272	/* skb is pure payload to encrypt */
273
274	aead = x->data;
275	alen = crypto_aead_authsize(aead);
276
277	esp.tfclen = 0;
278	/* XXX: Add support for tfc padding here. */
279
280	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
281	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
282	esp.plen = esp.clen - skb->len - esp.tfclen;
283	esp.tailen = esp.tfclen + esp.plen + alen;
284
285	esp.esph = ip_esp_hdr(skb);
286
287
288	if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
289		esp.nfrags = esp_output_head(x, skb, &esp);
290		if (esp.nfrags < 0)
291			return esp.nfrags;
292	}
293
294	seq = xo->seq.low;
295
296	esph = esp.esph;
297	esph->spi = x->id.spi;
298
299	skb_push(skb, -skb_network_offset(skb));
300
301	if (xo->flags & XFRM_GSO_SEGMENT) {
302		esph->seq_no = htonl(seq);
303
304		if (!skb_is_gso(skb))
305			xo->seq.low++;
306		else
307			xo->seq.low += skb_shinfo(skb)->gso_segs;
308	}
309
 
 
 
310	esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
311
312	ip_hdr(skb)->tot_len = htons(skb->len);
313	ip_send_check(ip_hdr(skb));
314
315	if (hw_offload)
 
 
 
 
 
 
 
 
316		return 0;
 
317
318	err = esp_output_tail(x, skb, &esp);
319	if (err)
320		return err;
321
322	secpath_reset(skb);
323
 
 
 
324	return 0;
325}
326
327static const struct net_offload esp4_offload = {
328	.callbacks = {
329		.gro_receive = esp4_gro_receive,
330		.gso_segment = esp4_gso_segment,
331	},
332};
333
334static const struct xfrm_type_offload esp_type_offload = {
335	.description	= "ESP4 OFFLOAD",
336	.owner		= THIS_MODULE,
337	.proto	     	= IPPROTO_ESP,
338	.input_tail	= esp_input_tail,
339	.xmit		= esp_xmit,
340	.encap		= esp4_gso_encap,
341};
342
343static int __init esp4_offload_init(void)
344{
345	if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
346		pr_info("%s: can't add xfrm type offload\n", __func__);
347		return -EAGAIN;
348	}
349
350	return inet_add_offload(&esp4_offload, IPPROTO_ESP);
351}
352
353static void __exit esp4_offload_exit(void)
354{
355	xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
356	inet_del_offload(&esp4_offload, IPPROTO_ESP);
357}
358
359module_init(esp4_offload_init);
360module_exit(esp4_offload_exit);
361MODULE_LICENSE("GPL");
362MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
363MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
364MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * IPV4 GSO/GRO offload support
  4 * Linux INET implementation
  5 *
  6 * Copyright (C) 2016 secunet Security Networks AG
  7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
  8 *
  9 * ESP GRO support
 10 */
 11
 12#include <linux/skbuff.h>
 13#include <linux/init.h>
 14#include <net/protocol.h>
 15#include <crypto/aead.h>
 16#include <crypto/authenc.h>
 17#include <linux/err.h>
 18#include <linux/module.h>
 19#include <net/gro.h>
 20#include <net/gso.h>
 21#include <net/ip.h>
 22#include <net/xfrm.h>
 23#include <net/esp.h>
 24#include <linux/scatterlist.h>
 25#include <linux/kernel.h>
 26#include <linux/slab.h>
 27#include <linux/spinlock.h>
 28#include <net/udp.h>
 29
 30static struct sk_buff *esp4_gro_receive(struct list_head *head,
 31					struct sk_buff *skb)
 32{
 33	int offset = skb_gro_offset(skb);
 34	struct xfrm_offload *xo;
 35	struct xfrm_state *x;
 36	int encap_type = 0;
 37	__be32 seq;
 38	__be32 spi;
 
 39
 40	if (!pskb_pull(skb, offset))
 41		return NULL;
 42
 43	if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
 44		goto out;
 45
 46	xo = xfrm_offload(skb);
 47	if (!xo || !(xo->flags & CRYPTO_DONE)) {
 48		struct sec_path *sp = secpath_set(skb);
 49
 50		if (!sp)
 51			goto out;
 52
 53		if (sp->len == XFRM_MAX_DEPTH)
 54			goto out_reset;
 55
 56		x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
 57				      (xfrm_address_t *)&ip_hdr(skb)->daddr,
 58				      spi, IPPROTO_ESP, AF_INET);
 59		if (!x)
 60			goto out_reset;
 61
 62		skb->mark = xfrm_smark_get(skb->mark, x);
 63
 64		sp->xvec[sp->len++] = x;
 65		sp->olen++;
 66
 67		xo = xfrm_offload(skb);
 68		if (!xo)
 69			goto out_reset;
 70	}
 71
 72	xo->flags |= XFRM_GRO;
 73
 74	if (NAPI_GRO_CB(skb)->proto == IPPROTO_UDP)
 75		encap_type = UDP_ENCAP_ESPINUDP;
 76
 77	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
 78	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
 79	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
 80	XFRM_SPI_SKB_CB(skb)->seq = seq;
 81
 82	/* We don't need to handle errors from xfrm_input, it does all
 83	 * the error handling and frees the resources on error. */
 84	xfrm_input(skb, IPPROTO_ESP, spi, encap_type);
 85
 86	return ERR_PTR(-EINPROGRESS);
 87out_reset:
 88	secpath_reset(skb);
 89out:
 90	skb_push(skb, offset);
 91	NAPI_GRO_CB(skb)->same_flow = 0;
 92	NAPI_GRO_CB(skb)->flush = 1;
 93
 94	return NULL;
 95}
 96
 97static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
 98{
 99	struct ip_esp_hdr *esph;
100	struct iphdr *iph = ip_hdr(skb);
101	struct xfrm_offload *xo = xfrm_offload(skb);
102	int proto = iph->protocol;
103
104	skb_push(skb, -skb_network_offset(skb));
105	esph = ip_esp_hdr(skb);
106	*skb_mac_header(skb) = IPPROTO_ESP;
107
108	esph->spi = x->id.spi;
109	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
110
111	xo->proto = proto;
112}
113
114static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
115						struct sk_buff *skb,
116						netdev_features_t features)
117{
118	__be16 type = x->inner_mode.family == AF_INET6 ? htons(ETH_P_IPV6)
119						       : htons(ETH_P_IP);
120
121	return skb_eth_gso_segment(skb, features, type);
122}
123
124static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
125						   struct sk_buff *skb,
126						   netdev_features_t features)
127{
128	const struct net_offload *ops;
129	struct sk_buff *segs = ERR_PTR(-EINVAL);
130	struct xfrm_offload *xo = xfrm_offload(skb);
131
132	skb->transport_header += x->props.header_len;
133	ops = rcu_dereference(inet_offloads[xo->proto]);
134	if (likely(ops && ops->callbacks.gso_segment))
135		segs = ops->callbacks.gso_segment(skb, features);
136
137	return segs;
138}
139
140static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
141					      struct sk_buff *skb,
142					      netdev_features_t features)
143{
144	struct xfrm_offload *xo = xfrm_offload(skb);
145	struct sk_buff *segs = ERR_PTR(-EINVAL);
146	const struct net_offload *ops;
147	u8 proto = xo->proto;
148
149	skb->transport_header += x->props.header_len;
150
151	if (x->sel.family != AF_INET6) {
152		if (proto == IPPROTO_BEETPH) {
153			struct ip_beet_phdr *ph =
154				(struct ip_beet_phdr *)skb->data;
155
156			skb->transport_header += ph->hdrlen * 8;
157			proto = ph->nexthdr;
158		} else {
159			skb->transport_header -= IPV4_BEET_PHMAXLEN;
160		}
161	} else {
162		__be16 frag;
163
164		skb->transport_header +=
165			ipv6_skip_exthdr(skb, 0, &proto, &frag);
166		if (proto == IPPROTO_TCP)
167			skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
168	}
169
170	if (proto == IPPROTO_IPV6)
171		skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
172
173	__skb_pull(skb, skb_transport_offset(skb));
174	ops = rcu_dereference(inet_offloads[proto]);
175	if (likely(ops && ops->callbacks.gso_segment))
176		segs = ops->callbacks.gso_segment(skb, features);
177
178	return segs;
179}
180
181static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
182						    struct sk_buff *skb,
183						    netdev_features_t features)
184{
185	switch (x->outer_mode.encap) {
186	case XFRM_MODE_TUNNEL:
187		return xfrm4_tunnel_gso_segment(x, skb, features);
188	case XFRM_MODE_TRANSPORT:
189		return xfrm4_transport_gso_segment(x, skb, features);
190	case XFRM_MODE_BEET:
191		return xfrm4_beet_gso_segment(x, skb, features);
192	}
193
194	return ERR_PTR(-EOPNOTSUPP);
195}
196
197static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
198				        netdev_features_t features)
199{
200	struct xfrm_state *x;
201	struct ip_esp_hdr *esph;
202	struct crypto_aead *aead;
203	netdev_features_t esp_features = features;
204	struct xfrm_offload *xo = xfrm_offload(skb);
205	struct sec_path *sp;
206
207	if (!xo)
208		return ERR_PTR(-EINVAL);
209
210	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
211		return ERR_PTR(-EINVAL);
212
213	sp = skb_sec_path(skb);
214	x = sp->xvec[sp->len - 1];
215	aead = x->data;
216	esph = ip_esp_hdr(skb);
217
218	if (esph->spi != x->id.spi)
219		return ERR_PTR(-EINVAL);
220
221	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
222		return ERR_PTR(-EINVAL);
223
224	__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
225
226	skb->encap_hdr_csum = 1;
227
228	if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
229	     !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
230		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
231					    NETIF_F_SCTP_CRC);
232	else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
233		 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
234		esp_features = features & ~(NETIF_F_CSUM_MASK |
235					    NETIF_F_SCTP_CRC);
236
237	xo->flags |= XFRM_GSO_SEGMENT;
238
239	return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
240}
241
242static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
243{
244	struct crypto_aead *aead = x->data;
245	struct xfrm_offload *xo = xfrm_offload(skb);
246
247	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
248		return -EINVAL;
249
250	if (!(xo->flags & CRYPTO_DONE))
251		skb->ip_summed = CHECKSUM_NONE;
252
253	return esp_input_done2(skb, 0);
254}
255
256static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
257{
258	int err;
259	int alen;
260	int blksize;
261	struct xfrm_offload *xo;
262	struct ip_esp_hdr *esph;
263	struct crypto_aead *aead;
264	struct esp_info esp;
265	bool hw_offload = true;
266	__u32 seq;
267
268	esp.inplace = true;
269
270	xo = xfrm_offload(skb);
271
272	if (!xo)
273		return -EINVAL;
274
275	if ((!(features & NETIF_F_HW_ESP) &&
276	     !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
277	    x->xso.dev != skb->dev) {
278		xo->flags |= CRYPTO_FALLBACK;
279		hw_offload = false;
280	}
281
282	esp.proto = xo->proto;
283
284	/* skb is pure payload to encrypt */
285
286	aead = x->data;
287	alen = crypto_aead_authsize(aead);
288
289	esp.tfclen = 0;
290	/* XXX: Add support for tfc padding here. */
291
292	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
293	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
294	esp.plen = esp.clen - skb->len - esp.tfclen;
295	esp.tailen = esp.tfclen + esp.plen + alen;
296
297	esp.esph = ip_esp_hdr(skb);
298
299
300	if (!hw_offload || !skb_is_gso(skb)) {
301		esp.nfrags = esp_output_head(x, skb, &esp);
302		if (esp.nfrags < 0)
303			return esp.nfrags;
304	}
305
306	seq = xo->seq.low;
307
308	esph = esp.esph;
309	esph->spi = x->id.spi;
310
311	skb_push(skb, -skb_network_offset(skb));
312
313	if (xo->flags & XFRM_GSO_SEGMENT) {
314		esph->seq_no = htonl(seq);
315
316		if (!skb_is_gso(skb))
317			xo->seq.low++;
318		else
319			xo->seq.low += skb_shinfo(skb)->gso_segs;
320	}
321
322	if (xo->seq.low < seq)
323		xo->seq.hi++;
324
325	esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
326
327	ip_hdr(skb)->tot_len = htons(skb->len);
328	ip_send_check(ip_hdr(skb));
329
330	if (hw_offload) {
331		if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
332			return -ENOMEM;
333
334		xo = xfrm_offload(skb);
335		if (!xo)
336			return -EINVAL;
337
338		xo->flags |= XFRM_XMIT;
339		return 0;
340	}
341
342	err = esp_output_tail(x, skb, &esp);
343	if (err)
344		return err;
345
346	secpath_reset(skb);
347
348	if (skb_needs_linearize(skb, skb->dev->features) &&
349	    __skb_linearize(skb))
350		return -ENOMEM;
351	return 0;
352}
353
354static const struct net_offload esp4_offload = {
355	.callbacks = {
356		.gro_receive = esp4_gro_receive,
357		.gso_segment = esp4_gso_segment,
358	},
359};
360
361static const struct xfrm_type_offload esp_type_offload = {
 
362	.owner		= THIS_MODULE,
363	.proto	     	= IPPROTO_ESP,
364	.input_tail	= esp_input_tail,
365	.xmit		= esp_xmit,
366	.encap		= esp4_gso_encap,
367};
368
369static int __init esp4_offload_init(void)
370{
371	if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
372		pr_info("%s: can't add xfrm type offload\n", __func__);
373		return -EAGAIN;
374	}
375
376	return inet_add_offload(&esp4_offload, IPPROTO_ESP);
377}
378
379static void __exit esp4_offload_exit(void)
380{
381	xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
382	inet_del_offload(&esp4_offload, IPPROTO_ESP);
383}
384
385module_init(esp4_offload_init);
386module_exit(esp4_offload_exit);
387MODULE_LICENSE("GPL");
388MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
389MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
390MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");