Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * IPV4 GSO/GRO offload support
  4 * Linux INET implementation
  5 *
  6 * Copyright (C) 2016 secunet Security Networks AG
  7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
  8 *
  9 * ESP GRO support
 10 */
 11
 12#include <linux/skbuff.h>
 13#include <linux/init.h>
 14#include <net/protocol.h>
 15#include <crypto/aead.h>
 16#include <crypto/authenc.h>
 17#include <linux/err.h>
 18#include <linux/module.h>
 19#include <net/gro.h>
 20#include <net/gso.h>
 21#include <net/ip.h>
 22#include <net/xfrm.h>
 23#include <net/esp.h>
 24#include <linux/scatterlist.h>
 25#include <linux/kernel.h>
 26#include <linux/slab.h>
 27#include <linux/spinlock.h>
 28#include <net/udp.h>
 29
 30static struct sk_buff *esp4_gro_receive(struct list_head *head,
 31					struct sk_buff *skb)
 32{
 33	int offset = skb_gro_offset(skb);
 34	struct xfrm_offload *xo;
 35	struct xfrm_state *x;
 36	int encap_type = 0;
 37	__be32 seq;
 38	__be32 spi;
 39
 40	if (!pskb_pull(skb, offset))
 41		return NULL;
 42
 43	if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
 44		goto out;
 45
 46	xo = xfrm_offload(skb);
 47	if (!xo || !(xo->flags & CRYPTO_DONE)) {
 48		struct sec_path *sp = secpath_set(skb);
 49
 50		if (!sp)
 51			goto out;
 52
 53		if (sp->len == XFRM_MAX_DEPTH)
 54			goto out_reset;
 55
 56		x = xfrm_input_state_lookup(dev_net(skb->dev), skb->mark,
 57					    (xfrm_address_t *)&ip_hdr(skb)->daddr,
 58					    spi, IPPROTO_ESP, AF_INET);
 59
 60		if (unlikely(x && x->dir && x->dir != XFRM_SA_DIR_IN)) {
 61			/* non-offload path will record the error and audit log */
 62			xfrm_state_put(x);
 63			x = NULL;
 64		}
 65
 66		if (!x)
 67			goto out_reset;
 68
 69		skb->mark = xfrm_smark_get(skb->mark, x);
 70
 71		sp->xvec[sp->len++] = x;
 72		sp->olen++;
 73
 74		xo = xfrm_offload(skb);
 75		if (!xo)
 76			goto out_reset;
 77	}
 78
 79	xo->flags |= XFRM_GRO;
 80
 81	if (NAPI_GRO_CB(skb)->proto == IPPROTO_UDP)
 82		encap_type = UDP_ENCAP_ESPINUDP;
 83
 84	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
 85	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
 86	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
 87	XFRM_SPI_SKB_CB(skb)->seq = seq;
 88
 89	/* We don't need to handle errors from xfrm_input, it does all
 90	 * the error handling and frees the resources on error. */
 91	xfrm_input(skb, IPPROTO_ESP, spi, encap_type);
 92
 93	return ERR_PTR(-EINPROGRESS);
 94out_reset:
 95	secpath_reset(skb);
 96out:
 97	skb_push(skb, offset);
 98	NAPI_GRO_CB(skb)->same_flow = 0;
 99	NAPI_GRO_CB(skb)->flush = 1;
100
101	return NULL;
102}
103
104static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
105{
106	struct ip_esp_hdr *esph;
107	struct iphdr *iph = ip_hdr(skb);
108	struct xfrm_offload *xo = xfrm_offload(skb);
109	int proto = iph->protocol;
110
111	skb_push(skb, -skb_network_offset(skb));
112	esph = ip_esp_hdr(skb);
113	*skb_mac_header(skb) = IPPROTO_ESP;
114
115	esph->spi = x->id.spi;
116	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
117
118	xo->proto = proto;
119}
120
121static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
122						struct sk_buff *skb,
123						netdev_features_t features)
124{
125	__be16 type = x->inner_mode.family == AF_INET6 ? htons(ETH_P_IPV6)
126						       : htons(ETH_P_IP);
127
128	return skb_eth_gso_segment(skb, features, type);
129}
130
131static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
132						   struct sk_buff *skb,
133						   netdev_features_t features)
134{
135	const struct net_offload *ops;
136	struct sk_buff *segs = ERR_PTR(-EINVAL);
137	struct xfrm_offload *xo = xfrm_offload(skb);
138
139	skb->transport_header += x->props.header_len;
140	ops = rcu_dereference(inet_offloads[xo->proto]);
141	if (likely(ops && ops->callbacks.gso_segment))
142		segs = ops->callbacks.gso_segment(skb, features);
143
144	return segs;
145}
146
147static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
148					      struct sk_buff *skb,
149					      netdev_features_t features)
150{
151	struct xfrm_offload *xo = xfrm_offload(skb);
152	struct sk_buff *segs = ERR_PTR(-EINVAL);
153	const struct net_offload *ops;
154	u8 proto = xo->proto;
155
156	skb->transport_header += x->props.header_len;
157
158	if (x->sel.family != AF_INET6) {
159		if (proto == IPPROTO_BEETPH) {
160			struct ip_beet_phdr *ph =
161				(struct ip_beet_phdr *)skb->data;
162
163			skb->transport_header += ph->hdrlen * 8;
164			proto = ph->nexthdr;
165		} else {
166			skb->transport_header -= IPV4_BEET_PHMAXLEN;
167		}
168	} else {
169		__be16 frag;
170
171		skb->transport_header +=
172			ipv6_skip_exthdr(skb, 0, &proto, &frag);
173		if (proto == IPPROTO_TCP)
174			skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
175	}
176
177	if (proto == IPPROTO_IPV6)
178		skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
179
180	__skb_pull(skb, skb_transport_offset(skb));
181	ops = rcu_dereference(inet_offloads[proto]);
182	if (likely(ops && ops->callbacks.gso_segment))
183		segs = ops->callbacks.gso_segment(skb, features);
184
185	return segs;
186}
187
188static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
189						    struct sk_buff *skb,
190						    netdev_features_t features)
191{
192	switch (x->outer_mode.encap) {
193	case XFRM_MODE_TUNNEL:
194		return xfrm4_tunnel_gso_segment(x, skb, features);
195	case XFRM_MODE_TRANSPORT:
196		return xfrm4_transport_gso_segment(x, skb, features);
197	case XFRM_MODE_BEET:
198		return xfrm4_beet_gso_segment(x, skb, features);
199	}
200
201	return ERR_PTR(-EOPNOTSUPP);
202}
203
204static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
205				        netdev_features_t features)
206{
207	struct xfrm_state *x;
208	struct ip_esp_hdr *esph;
209	struct crypto_aead *aead;
210	netdev_features_t esp_features = features;
211	struct xfrm_offload *xo = xfrm_offload(skb);
212	struct sec_path *sp;
213
214	if (!xo)
215		return ERR_PTR(-EINVAL);
216
217	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
218		return ERR_PTR(-EINVAL);
219
220	sp = skb_sec_path(skb);
221	x = sp->xvec[sp->len - 1];
222	aead = x->data;
223	esph = ip_esp_hdr(skb);
224
225	if (esph->spi != x->id.spi)
226		return ERR_PTR(-EINVAL);
227
228	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
229		return ERR_PTR(-EINVAL);
230
231	__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
232
233	skb->encap_hdr_csum = 1;
234
235	if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
236	     !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
237		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
238					    NETIF_F_SCTP_CRC);
239	else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
240		 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
241		esp_features = features & ~(NETIF_F_CSUM_MASK |
242					    NETIF_F_SCTP_CRC);
243
244	xo->flags |= XFRM_GSO_SEGMENT;
245
246	return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
247}
248
249static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
250{
251	struct crypto_aead *aead = x->data;
252	struct xfrm_offload *xo = xfrm_offload(skb);
253
254	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
255		return -EINVAL;
256
257	if (!(xo->flags & CRYPTO_DONE))
258		skb->ip_summed = CHECKSUM_NONE;
259
260	return esp_input_done2(skb, 0);
261}
262
263static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
264{
265	int err;
266	int alen;
267	int blksize;
268	struct xfrm_offload *xo;
269	struct ip_esp_hdr *esph;
270	struct crypto_aead *aead;
271	struct esp_info esp;
272	bool hw_offload = true;
273	__u32 seq;
274	int encap_type = 0;
275
276	esp.inplace = true;
277
278	xo = xfrm_offload(skb);
279
280	if (!xo)
281		return -EINVAL;
282
283	if ((!(features & NETIF_F_HW_ESP) &&
284	     !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
285	    x->xso.dev != skb->dev) {
286		xo->flags |= CRYPTO_FALLBACK;
287		hw_offload = false;
288	}
289
290	esp.proto = xo->proto;
291
292	/* skb is pure payload to encrypt */
293
294	aead = x->data;
295	alen = crypto_aead_authsize(aead);
296
297	esp.tfclen = 0;
298	/* XXX: Add support for tfc padding here. */
299
300	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
301	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
302	esp.plen = esp.clen - skb->len - esp.tfclen;
303	esp.tailen = esp.tfclen + esp.plen + alen;
304
305	esp.esph = ip_esp_hdr(skb);
306
307	if (x->encap)
308		encap_type = x->encap->encap_type;
309
310	if (!hw_offload || !skb_is_gso(skb) || (hw_offload && encap_type == UDP_ENCAP_ESPINUDP)) {
311		esp.nfrags = esp_output_head(x, skb, &esp);
312		if (esp.nfrags < 0)
313			return esp.nfrags;
314	}
315
316	seq = xo->seq.low;
317
318	esph = esp.esph;
319	esph->spi = x->id.spi;
320
321	skb_push(skb, -skb_network_offset(skb));
322
323	if (xo->flags & XFRM_GSO_SEGMENT) {
324		esph->seq_no = htonl(seq);
325
326		if (!skb_is_gso(skb))
327			xo->seq.low++;
328		else
329			xo->seq.low += skb_shinfo(skb)->gso_segs;
330	}
331
332	if (xo->seq.low < seq)
333		xo->seq.hi++;
334
335	esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
336
337	if (hw_offload && encap_type == UDP_ENCAP_ESPINUDP) {
338		/* In the XFRM stack, the encapsulation protocol is set to iphdr->protocol by
339		 * setting *skb_mac_header(skb) (see esp_output_udp_encap()) where skb->mac_header
340		 * points to iphdr->protocol (see xfrm4_tunnel_encap_add()).
341		 * However, in esp_xmit(), skb->mac_header doesn't point to iphdr->protocol.
342		 * Therefore, the protocol field needs to be corrected.
343		 */
344		ip_hdr(skb)->protocol = IPPROTO_UDP;
345
346		esph->seq_no = htonl(seq);
347	}
348
349	ip_hdr(skb)->tot_len = htons(skb->len);
350	ip_send_check(ip_hdr(skb));
351
352	if (hw_offload) {
353		if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
354			return -ENOMEM;
355
356		xo = xfrm_offload(skb);
357		if (!xo)
358			return -EINVAL;
359
360		xo->flags |= XFRM_XMIT;
361		return 0;
362	}
363
364	err = esp_output_tail(x, skb, &esp);
365	if (err)
366		return err;
367
368	secpath_reset(skb);
369
370	if (skb_needs_linearize(skb, skb->dev->features) &&
371	    __skb_linearize(skb))
372		return -ENOMEM;
373	return 0;
374}
375
376static const struct net_offload esp4_offload = {
377	.callbacks = {
378		.gro_receive = esp4_gro_receive,
379		.gso_segment = esp4_gso_segment,
380	},
381};
382
383static const struct xfrm_type_offload esp_type_offload = {
384	.owner		= THIS_MODULE,
385	.proto	     	= IPPROTO_ESP,
386	.input_tail	= esp_input_tail,
387	.xmit		= esp_xmit,
388	.encap		= esp4_gso_encap,
389};
390
391static int __init esp4_offload_init(void)
392{
393	if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
394		pr_info("%s: can't add xfrm type offload\n", __func__);
395		return -EAGAIN;
396	}
397
398	return inet_add_offload(&esp4_offload, IPPROTO_ESP);
399}
400
401static void __exit esp4_offload_exit(void)
402{
403	xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
404	inet_del_offload(&esp4_offload, IPPROTO_ESP);
405}
406
407module_init(esp4_offload_init);
408module_exit(esp4_offload_exit);
409MODULE_LICENSE("GPL");
410MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
411MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
412MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * IPV4 GSO/GRO offload support
  4 * Linux INET implementation
  5 *
  6 * Copyright (C) 2016 secunet Security Networks AG
  7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
  8 *
  9 * ESP GRO support
 10 */
 11
 12#include <linux/skbuff.h>
 13#include <linux/init.h>
 14#include <net/protocol.h>
 15#include <crypto/aead.h>
 16#include <crypto/authenc.h>
 17#include <linux/err.h>
 18#include <linux/module.h>
 19#include <net/gro.h>
 20#include <net/gso.h>
 21#include <net/ip.h>
 22#include <net/xfrm.h>
 23#include <net/esp.h>
 24#include <linux/scatterlist.h>
 25#include <linux/kernel.h>
 26#include <linux/slab.h>
 27#include <linux/spinlock.h>
 28#include <net/udp.h>
 29
 30static struct sk_buff *esp4_gro_receive(struct list_head *head,
 31					struct sk_buff *skb)
 32{
 33	int offset = skb_gro_offset(skb);
 34	struct xfrm_offload *xo;
 35	struct xfrm_state *x;
 36	int encap_type = 0;
 37	__be32 seq;
 38	__be32 spi;
 39
 40	if (!pskb_pull(skb, offset))
 41		return NULL;
 42
 43	if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
 44		goto out;
 45
 46	xo = xfrm_offload(skb);
 47	if (!xo || !(xo->flags & CRYPTO_DONE)) {
 48		struct sec_path *sp = secpath_set(skb);
 49
 50		if (!sp)
 51			goto out;
 52
 53		if (sp->len == XFRM_MAX_DEPTH)
 54			goto out_reset;
 55
 56		x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
 57				      (xfrm_address_t *)&ip_hdr(skb)->daddr,
 58				      spi, IPPROTO_ESP, AF_INET);
 
 
 
 
 
 
 
 59		if (!x)
 60			goto out_reset;
 61
 62		skb->mark = xfrm_smark_get(skb->mark, x);
 63
 64		sp->xvec[sp->len++] = x;
 65		sp->olen++;
 66
 67		xo = xfrm_offload(skb);
 68		if (!xo)
 69			goto out_reset;
 70	}
 71
 72	xo->flags |= XFRM_GRO;
 73
 74	if (NAPI_GRO_CB(skb)->proto == IPPROTO_UDP)
 75		encap_type = UDP_ENCAP_ESPINUDP;
 76
 77	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
 78	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
 79	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
 80	XFRM_SPI_SKB_CB(skb)->seq = seq;
 81
 82	/* We don't need to handle errors from xfrm_input, it does all
 83	 * the error handling and frees the resources on error. */
 84	xfrm_input(skb, IPPROTO_ESP, spi, encap_type);
 85
 86	return ERR_PTR(-EINPROGRESS);
 87out_reset:
 88	secpath_reset(skb);
 89out:
 90	skb_push(skb, offset);
 91	NAPI_GRO_CB(skb)->same_flow = 0;
 92	NAPI_GRO_CB(skb)->flush = 1;
 93
 94	return NULL;
 95}
 96
 97static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
 98{
 99	struct ip_esp_hdr *esph;
100	struct iphdr *iph = ip_hdr(skb);
101	struct xfrm_offload *xo = xfrm_offload(skb);
102	int proto = iph->protocol;
103
104	skb_push(skb, -skb_network_offset(skb));
105	esph = ip_esp_hdr(skb);
106	*skb_mac_header(skb) = IPPROTO_ESP;
107
108	esph->spi = x->id.spi;
109	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
110
111	xo->proto = proto;
112}
113
114static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
115						struct sk_buff *skb,
116						netdev_features_t features)
117{
118	__be16 type = x->inner_mode.family == AF_INET6 ? htons(ETH_P_IPV6)
119						       : htons(ETH_P_IP);
120
121	return skb_eth_gso_segment(skb, features, type);
122}
123
124static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
125						   struct sk_buff *skb,
126						   netdev_features_t features)
127{
128	const struct net_offload *ops;
129	struct sk_buff *segs = ERR_PTR(-EINVAL);
130	struct xfrm_offload *xo = xfrm_offload(skb);
131
132	skb->transport_header += x->props.header_len;
133	ops = rcu_dereference(inet_offloads[xo->proto]);
134	if (likely(ops && ops->callbacks.gso_segment))
135		segs = ops->callbacks.gso_segment(skb, features);
136
137	return segs;
138}
139
140static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
141					      struct sk_buff *skb,
142					      netdev_features_t features)
143{
144	struct xfrm_offload *xo = xfrm_offload(skb);
145	struct sk_buff *segs = ERR_PTR(-EINVAL);
146	const struct net_offload *ops;
147	u8 proto = xo->proto;
148
149	skb->transport_header += x->props.header_len;
150
151	if (x->sel.family != AF_INET6) {
152		if (proto == IPPROTO_BEETPH) {
153			struct ip_beet_phdr *ph =
154				(struct ip_beet_phdr *)skb->data;
155
156			skb->transport_header += ph->hdrlen * 8;
157			proto = ph->nexthdr;
158		} else {
159			skb->transport_header -= IPV4_BEET_PHMAXLEN;
160		}
161	} else {
162		__be16 frag;
163
164		skb->transport_header +=
165			ipv6_skip_exthdr(skb, 0, &proto, &frag);
166		if (proto == IPPROTO_TCP)
167			skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
168	}
169
170	if (proto == IPPROTO_IPV6)
171		skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
172
173	__skb_pull(skb, skb_transport_offset(skb));
174	ops = rcu_dereference(inet_offloads[proto]);
175	if (likely(ops && ops->callbacks.gso_segment))
176		segs = ops->callbacks.gso_segment(skb, features);
177
178	return segs;
179}
180
181static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
182						    struct sk_buff *skb,
183						    netdev_features_t features)
184{
185	switch (x->outer_mode.encap) {
186	case XFRM_MODE_TUNNEL:
187		return xfrm4_tunnel_gso_segment(x, skb, features);
188	case XFRM_MODE_TRANSPORT:
189		return xfrm4_transport_gso_segment(x, skb, features);
190	case XFRM_MODE_BEET:
191		return xfrm4_beet_gso_segment(x, skb, features);
192	}
193
194	return ERR_PTR(-EOPNOTSUPP);
195}
196
197static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
198				        netdev_features_t features)
199{
200	struct xfrm_state *x;
201	struct ip_esp_hdr *esph;
202	struct crypto_aead *aead;
203	netdev_features_t esp_features = features;
204	struct xfrm_offload *xo = xfrm_offload(skb);
205	struct sec_path *sp;
206
207	if (!xo)
208		return ERR_PTR(-EINVAL);
209
210	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
211		return ERR_PTR(-EINVAL);
212
213	sp = skb_sec_path(skb);
214	x = sp->xvec[sp->len - 1];
215	aead = x->data;
216	esph = ip_esp_hdr(skb);
217
218	if (esph->spi != x->id.spi)
219		return ERR_PTR(-EINVAL);
220
221	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
222		return ERR_PTR(-EINVAL);
223
224	__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
225
226	skb->encap_hdr_csum = 1;
227
228	if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
229	     !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
230		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
231					    NETIF_F_SCTP_CRC);
232	else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
233		 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
234		esp_features = features & ~(NETIF_F_CSUM_MASK |
235					    NETIF_F_SCTP_CRC);
236
237	xo->flags |= XFRM_GSO_SEGMENT;
238
239	return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
240}
241
242static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
243{
244	struct crypto_aead *aead = x->data;
245	struct xfrm_offload *xo = xfrm_offload(skb);
246
247	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
248		return -EINVAL;
249
250	if (!(xo->flags & CRYPTO_DONE))
251		skb->ip_summed = CHECKSUM_NONE;
252
253	return esp_input_done2(skb, 0);
254}
255
256static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
257{
258	int err;
259	int alen;
260	int blksize;
261	struct xfrm_offload *xo;
262	struct ip_esp_hdr *esph;
263	struct crypto_aead *aead;
264	struct esp_info esp;
265	bool hw_offload = true;
266	__u32 seq;
 
267
268	esp.inplace = true;
269
270	xo = xfrm_offload(skb);
271
272	if (!xo)
273		return -EINVAL;
274
275	if ((!(features & NETIF_F_HW_ESP) &&
276	     !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
277	    x->xso.dev != skb->dev) {
278		xo->flags |= CRYPTO_FALLBACK;
279		hw_offload = false;
280	}
281
282	esp.proto = xo->proto;
283
284	/* skb is pure payload to encrypt */
285
286	aead = x->data;
287	alen = crypto_aead_authsize(aead);
288
289	esp.tfclen = 0;
290	/* XXX: Add support for tfc padding here. */
291
292	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
293	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
294	esp.plen = esp.clen - skb->len - esp.tfclen;
295	esp.tailen = esp.tfclen + esp.plen + alen;
296
297	esp.esph = ip_esp_hdr(skb);
298
 
 
299
300	if (!hw_offload || !skb_is_gso(skb)) {
301		esp.nfrags = esp_output_head(x, skb, &esp);
302		if (esp.nfrags < 0)
303			return esp.nfrags;
304	}
305
306	seq = xo->seq.low;
307
308	esph = esp.esph;
309	esph->spi = x->id.spi;
310
311	skb_push(skb, -skb_network_offset(skb));
312
313	if (xo->flags & XFRM_GSO_SEGMENT) {
314		esph->seq_no = htonl(seq);
315
316		if (!skb_is_gso(skb))
317			xo->seq.low++;
318		else
319			xo->seq.low += skb_shinfo(skb)->gso_segs;
320	}
321
322	if (xo->seq.low < seq)
323		xo->seq.hi++;
324
325	esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
 
 
 
 
 
 
 
 
 
 
 
 
326
327	ip_hdr(skb)->tot_len = htons(skb->len);
328	ip_send_check(ip_hdr(skb));
329
330	if (hw_offload) {
331		if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
332			return -ENOMEM;
333
334		xo = xfrm_offload(skb);
335		if (!xo)
336			return -EINVAL;
337
338		xo->flags |= XFRM_XMIT;
339		return 0;
340	}
341
342	err = esp_output_tail(x, skb, &esp);
343	if (err)
344		return err;
345
346	secpath_reset(skb);
347
348	if (skb_needs_linearize(skb, skb->dev->features) &&
349	    __skb_linearize(skb))
350		return -ENOMEM;
351	return 0;
352}
353
354static const struct net_offload esp4_offload = {
355	.callbacks = {
356		.gro_receive = esp4_gro_receive,
357		.gso_segment = esp4_gso_segment,
358	},
359};
360
361static const struct xfrm_type_offload esp_type_offload = {
362	.owner		= THIS_MODULE,
363	.proto	     	= IPPROTO_ESP,
364	.input_tail	= esp_input_tail,
365	.xmit		= esp_xmit,
366	.encap		= esp4_gso_encap,
367};
368
369static int __init esp4_offload_init(void)
370{
371	if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
372		pr_info("%s: can't add xfrm type offload\n", __func__);
373		return -EAGAIN;
374	}
375
376	return inet_add_offload(&esp4_offload, IPPROTO_ESP);
377}
378
379static void __exit esp4_offload_exit(void)
380{
381	xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
382	inet_del_offload(&esp4_offload, IPPROTO_ESP);
383}
384
385module_init(esp4_offload_init);
386module_exit(esp4_offload_exit);
387MODULE_LICENSE("GPL");
388MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
389MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
390MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");