Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2#define pr_fmt(fmt) "IPsec: " fmt
  3
  4#include <crypto/algapi.h>
  5#include <crypto/hash.h>
  6#include <linux/err.h>
  7#include <linux/module.h>
  8#include <linux/slab.h>
  9#include <net/ip.h>
 10#include <net/xfrm.h>
 11#include <net/ah.h>
 12#include <linux/crypto.h>
 13#include <linux/pfkeyv2.h>
 14#include <linux/scatterlist.h>
 15#include <net/icmp.h>
 16#include <net/protocol.h>
 17
 18struct ah_skb_cb {
 19	struct xfrm_skb_cb xfrm;
 20	void *tmp;
 21};
 22
 23#define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
 24
 25static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
 26			  unsigned int size)
 27{
 28	unsigned int len;
 29
 30	len = size + crypto_ahash_digestsize(ahash) +
 31	      (crypto_ahash_alignmask(ahash) &
 32	       ~(crypto_tfm_ctx_alignment() - 1));
 33
 34	len = ALIGN(len, crypto_tfm_ctx_alignment());
 35
 36	len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
 37	len = ALIGN(len, __alignof__(struct scatterlist));
 38
 39	len += sizeof(struct scatterlist) * nfrags;
 40
 41	return kmalloc(len, GFP_ATOMIC);
 42}
 43
 44static inline u8 *ah_tmp_auth(void *tmp, unsigned int offset)
 45{
 46	return tmp + offset;
 47}
 48
 49static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
 50			     unsigned int offset)
 51{
 52	return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
 53}
 54
 55static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
 56					       u8 *icv)
 57{
 58	struct ahash_request *req;
 59
 60	req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
 61				crypto_tfm_ctx_alignment());
 62
 63	ahash_request_set_tfm(req, ahash);
 64
 65	return req;
 66}
 67
 68static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
 69					     struct ahash_request *req)
 70{
 71	return (void *)ALIGN((unsigned long)(req + 1) +
 72			     crypto_ahash_reqsize(ahash),
 73			     __alignof__(struct scatterlist));
 74}
 75
 76/* Clear mutable options and find final destination to substitute
 77 * into IP header for icv calculation. Options are already checked
 78 * for validity, so paranoia is not required. */
 79
 80static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
 81{
 82	unsigned char *optptr = (unsigned char *)(iph+1);
 83	int  l = iph->ihl*4 - sizeof(struct iphdr);
 84	int  optlen;
 85
 86	while (l > 0) {
 87		switch (*optptr) {
 88		case IPOPT_END:
 89			return 0;
 90		case IPOPT_NOOP:
 91			l--;
 92			optptr++;
 93			continue;
 94		}
 95		optlen = optptr[1];
 96		if (optlen<2 || optlen>l)
 97			return -EINVAL;
 98		switch (*optptr) {
 99		case IPOPT_SEC:
100		case 0x85:	/* Some "Extended Security" crap. */
101		case IPOPT_CIPSO:
102		case IPOPT_RA:
103		case 0x80|21:	/* RFC1770 */
104			break;
105		case IPOPT_LSRR:
106		case IPOPT_SSRR:
107			if (optlen < 6)
108				return -EINVAL;
109			memcpy(daddr, optptr+optlen-4, 4);
110			fallthrough;
111		default:
112			memset(optptr, 0, optlen);
113		}
114		l -= optlen;
115		optptr += optlen;
116	}
117	return 0;
118}
119
120static void ah_output_done(struct crypto_async_request *base, int err)
121{
122	u8 *icv;
123	struct iphdr *iph;
124	struct sk_buff *skb = base->data;
125	struct xfrm_state *x = skb_dst(skb)->xfrm;
126	struct ah_data *ahp = x->data;
127	struct iphdr *top_iph = ip_hdr(skb);
128	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
129	int ihl = ip_hdrlen(skb);
130
131	iph = AH_SKB_CB(skb)->tmp;
132	icv = ah_tmp_icv(ahp->ahash, iph, ihl);
133	memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
134
135	top_iph->tos = iph->tos;
136	top_iph->ttl = iph->ttl;
137	top_iph->frag_off = iph->frag_off;
138	if (top_iph->ihl != 5) {
139		top_iph->daddr = iph->daddr;
140		memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
141	}
142
143	kfree(AH_SKB_CB(skb)->tmp);
144	xfrm_output_resume(skb, err);
145}
146
147static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
148{
149	int err;
150	int nfrags;
151	int ihl;
152	u8 *icv;
153	struct sk_buff *trailer;
154	struct crypto_ahash *ahash;
155	struct ahash_request *req;
156	struct scatterlist *sg;
157	struct iphdr *iph, *top_iph;
158	struct ip_auth_hdr *ah;
159	struct ah_data *ahp;
160	int seqhi_len = 0;
161	__be32 *seqhi;
162	int sglists = 0;
163	struct scatterlist *seqhisg;
164
165	ahp = x->data;
166	ahash = ahp->ahash;
167
168	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
169		goto out;
170	nfrags = err;
171
172	skb_push(skb, -skb_network_offset(skb));
173	ah = ip_auth_hdr(skb);
174	ihl = ip_hdrlen(skb);
175
176	if (x->props.flags & XFRM_STATE_ESN) {
177		sglists = 1;
178		seqhi_len = sizeof(*seqhi);
179	}
180	err = -ENOMEM;
181	iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl + seqhi_len);
182	if (!iph)
183		goto out;
184	seqhi = (__be32 *)((char *)iph + ihl);
185	icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
186	req = ah_tmp_req(ahash, icv);
187	sg = ah_req_sg(ahash, req);
188	seqhisg = sg + nfrags;
189
190	memset(ah->auth_data, 0, ahp->icv_trunc_len);
191
192	top_iph = ip_hdr(skb);
193
194	iph->tos = top_iph->tos;
195	iph->ttl = top_iph->ttl;
196	iph->frag_off = top_iph->frag_off;
197
198	if (top_iph->ihl != 5) {
199		iph->daddr = top_iph->daddr;
200		memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
201		err = ip_clear_mutable_options(top_iph, &top_iph->daddr);
202		if (err)
203			goto out_free;
204	}
205
206	ah->nexthdr = *skb_mac_header(skb);
207	*skb_mac_header(skb) = IPPROTO_AH;
208
209	top_iph->tos = 0;
210	top_iph->tot_len = htons(skb->len);
211	top_iph->frag_off = 0;
212	top_iph->ttl = 0;
213	top_iph->check = 0;
214
215	if (x->props.flags & XFRM_STATE_ALIGN4)
216		ah->hdrlen  = (XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
217	else
218		ah->hdrlen  = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
219
220	ah->reserved = 0;
221	ah->spi = x->id.spi;
222	ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
223
224	sg_init_table(sg, nfrags + sglists);
225	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
226	if (unlikely(err < 0))
227		goto out_free;
228
229	if (x->props.flags & XFRM_STATE_ESN) {
230		/* Attach seqhi sg right after packet payload */
231		*seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
232		sg_set_buf(seqhisg, seqhi, seqhi_len);
233	}
234	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
235	ahash_request_set_callback(req, 0, ah_output_done, skb);
236
237	AH_SKB_CB(skb)->tmp = iph;
238
239	err = crypto_ahash_digest(req);
240	if (err) {
241		if (err == -EINPROGRESS)
242			goto out;
243
244		if (err == -ENOSPC)
245			err = NET_XMIT_DROP;
246		goto out_free;
247	}
248
249	memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
250
251	top_iph->tos = iph->tos;
252	top_iph->ttl = iph->ttl;
253	top_iph->frag_off = iph->frag_off;
254	if (top_iph->ihl != 5) {
255		top_iph->daddr = iph->daddr;
256		memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
257	}
258
259out_free:
260	kfree(iph);
261out:
262	return err;
263}
264
265static void ah_input_done(struct crypto_async_request *base, int err)
266{
267	u8 *auth_data;
268	u8 *icv;
269	struct iphdr *work_iph;
270	struct sk_buff *skb = base->data;
271	struct xfrm_state *x = xfrm_input_state(skb);
272	struct ah_data *ahp = x->data;
273	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
274	int ihl = ip_hdrlen(skb);
275	int ah_hlen = (ah->hdrlen + 2) << 2;
276
277	if (err)
278		goto out;
279
280	work_iph = AH_SKB_CB(skb)->tmp;
281	auth_data = ah_tmp_auth(work_iph, ihl);
282	icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
283
284	err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
285	if (err)
286		goto out;
287
288	err = ah->nexthdr;
289
290	skb->network_header += ah_hlen;
291	memcpy(skb_network_header(skb), work_iph, ihl);
292	__skb_pull(skb, ah_hlen + ihl);
293
294	if (x->props.mode == XFRM_MODE_TUNNEL)
295		skb_reset_transport_header(skb);
296	else
297		skb_set_transport_header(skb, -ihl);
298out:
299	kfree(AH_SKB_CB(skb)->tmp);
300	xfrm_input_resume(skb, err);
301}
302
303static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
304{
305	int ah_hlen;
306	int ihl;
307	int nexthdr;
308	int nfrags;
309	u8 *auth_data;
310	u8 *icv;
311	struct sk_buff *trailer;
312	struct crypto_ahash *ahash;
313	struct ahash_request *req;
314	struct scatterlist *sg;
315	struct iphdr *iph, *work_iph;
316	struct ip_auth_hdr *ah;
317	struct ah_data *ahp;
318	int err = -ENOMEM;
319	int seqhi_len = 0;
320	__be32 *seqhi;
321	int sglists = 0;
322	struct scatterlist *seqhisg;
323
324	if (!pskb_may_pull(skb, sizeof(*ah)))
325		goto out;
326
327	ah = (struct ip_auth_hdr *)skb->data;
328	ahp = x->data;
329	ahash = ahp->ahash;
330
331	nexthdr = ah->nexthdr;
332	ah_hlen = (ah->hdrlen + 2) << 2;
333
334	if (x->props.flags & XFRM_STATE_ALIGN4) {
335		if (ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_full_len) &&
336		    ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len))
337			goto out;
338	} else {
339		if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
340		    ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
341			goto out;
342	}
343
344	if (!pskb_may_pull(skb, ah_hlen))
345		goto out;
346
347	/* We are going to _remove_ AH header to keep sockets happy,
348	 * so... Later this can change. */
349	if (skb_unclone(skb, GFP_ATOMIC))
350		goto out;
351
352	skb->ip_summed = CHECKSUM_NONE;
353
354
355	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
356		goto out;
357	nfrags = err;
358
359	ah = (struct ip_auth_hdr *)skb->data;
360	iph = ip_hdr(skb);
361	ihl = ip_hdrlen(skb);
362
363	if (x->props.flags & XFRM_STATE_ESN) {
364		sglists = 1;
365		seqhi_len = sizeof(*seqhi);
366	}
367
368	work_iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl +
369				ahp->icv_trunc_len + seqhi_len);
370	if (!work_iph) {
371		err = -ENOMEM;
372		goto out;
373	}
374
375	seqhi = (__be32 *)((char *)work_iph + ihl);
376	auth_data = ah_tmp_auth(seqhi, seqhi_len);
377	icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len);
378	req = ah_tmp_req(ahash, icv);
379	sg = ah_req_sg(ahash, req);
380	seqhisg = sg + nfrags;
381
382	memcpy(work_iph, iph, ihl);
383	memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
384	memset(ah->auth_data, 0, ahp->icv_trunc_len);
385
386	iph->ttl = 0;
387	iph->tos = 0;
388	iph->frag_off = 0;
389	iph->check = 0;
390	if (ihl > sizeof(*iph)) {
391		__be32 dummy;
392		err = ip_clear_mutable_options(iph, &dummy);
393		if (err)
394			goto out_free;
395	}
396
397	skb_push(skb, ihl);
398
399	sg_init_table(sg, nfrags + sglists);
400	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
401	if (unlikely(err < 0))
402		goto out_free;
403
404	if (x->props.flags & XFRM_STATE_ESN) {
405		/* Attach seqhi sg right after packet payload */
406		*seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
407		sg_set_buf(seqhisg, seqhi, seqhi_len);
408	}
409	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
410	ahash_request_set_callback(req, 0, ah_input_done, skb);
411
412	AH_SKB_CB(skb)->tmp = work_iph;
413
414	err = crypto_ahash_digest(req);
415	if (err) {
416		if (err == -EINPROGRESS)
417			goto out;
418
419		goto out_free;
420	}
421
422	err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
423	if (err)
424		goto out_free;
425
426	skb->network_header += ah_hlen;
427	memcpy(skb_network_header(skb), work_iph, ihl);
428	__skb_pull(skb, ah_hlen + ihl);
429	if (x->props.mode == XFRM_MODE_TUNNEL)
430		skb_reset_transport_header(skb);
431	else
432		skb_set_transport_header(skb, -ihl);
433
434	err = nexthdr;
435
436out_free:
437	kfree (work_iph);
438out:
439	return err;
440}
441
442static int ah4_err(struct sk_buff *skb, u32 info)
443{
444	struct net *net = dev_net(skb->dev);
445	const struct iphdr *iph = (const struct iphdr *)skb->data;
446	struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
447	struct xfrm_state *x;
448
449	switch (icmp_hdr(skb)->type) {
450	case ICMP_DEST_UNREACH:
451		if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
452			return 0;
453	case ICMP_REDIRECT:
454		break;
455	default:
456		return 0;
457	}
458
459	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
460			      ah->spi, IPPROTO_AH, AF_INET);
461	if (!x)
462		return 0;
463
464	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
465		ipv4_update_pmtu(skb, net, info, 0, IPPROTO_AH);
466	else
467		ipv4_redirect(skb, net, 0, IPPROTO_AH);
468	xfrm_state_put(x);
469
470	return 0;
471}
472
473static int ah_init_state(struct xfrm_state *x)
474{
475	struct ah_data *ahp = NULL;
476	struct xfrm_algo_desc *aalg_desc;
477	struct crypto_ahash *ahash;
478
479	if (!x->aalg)
480		goto error;
481
482	if (x->encap)
483		goto error;
484
485	ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
486	if (!ahp)
487		return -ENOMEM;
488
489	ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
490	if (IS_ERR(ahash))
491		goto error;
492
493	ahp->ahash = ahash;
494	if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
495				(x->aalg->alg_key_len + 7) / 8))
496		goto error;
497
498	/*
499	 * Lookup the algorithm description maintained by xfrm_algo,
500	 * verify crypto transform properties, and store information
501	 * we need for AH processing.  This lookup cannot fail here
502	 * after a successful crypto_alloc_ahash().
503	 */
504	aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
505	BUG_ON(!aalg_desc);
506
507	if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
508	    crypto_ahash_digestsize(ahash)) {
509		pr_info("%s: %s digestsize %u != %hu\n",
510			__func__, x->aalg->alg_name,
511			crypto_ahash_digestsize(ahash),
512			aalg_desc->uinfo.auth.icv_fullbits / 8);
513		goto error;
514	}
515
516	ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
517	ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
518
519	if (x->props.flags & XFRM_STATE_ALIGN4)
520		x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) +
521						  ahp->icv_trunc_len);
522	else
523		x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
524						  ahp->icv_trunc_len);
525	if (x->props.mode == XFRM_MODE_TUNNEL)
526		x->props.header_len += sizeof(struct iphdr);
527	x->data = ahp;
528
529	return 0;
530
531error:
532	if (ahp) {
533		crypto_free_ahash(ahp->ahash);
534		kfree(ahp);
535	}
536	return -EINVAL;
537}
538
539static void ah_destroy(struct xfrm_state *x)
540{
541	struct ah_data *ahp = x->data;
542
543	if (!ahp)
544		return;
545
546	crypto_free_ahash(ahp->ahash);
547	kfree(ahp);
548}
549
550static int ah4_rcv_cb(struct sk_buff *skb, int err)
551{
552	return 0;
553}
554
555static const struct xfrm_type ah_type =
556{
557	.description	= "AH4",
558	.owner		= THIS_MODULE,
559	.proto	     	= IPPROTO_AH,
560	.flags		= XFRM_TYPE_REPLAY_PROT,
561	.init_state	= ah_init_state,
562	.destructor	= ah_destroy,
563	.input		= ah_input,
564	.output		= ah_output
565};
566
567static struct xfrm4_protocol ah4_protocol = {
568	.handler	=	xfrm4_rcv,
569	.input_handler	=	xfrm_input,
570	.cb_handler	=	ah4_rcv_cb,
571	.err_handler	=	ah4_err,
572	.priority	=	0,
573};
574
575static int __init ah4_init(void)
576{
577	if (xfrm_register_type(&ah_type, AF_INET) < 0) {
578		pr_info("%s: can't add xfrm type\n", __func__);
579		return -EAGAIN;
580	}
581	if (xfrm4_protocol_register(&ah4_protocol, IPPROTO_AH) < 0) {
582		pr_info("%s: can't add protocol\n", __func__);
583		xfrm_unregister_type(&ah_type, AF_INET);
584		return -EAGAIN;
585	}
586	return 0;
587}
588
589static void __exit ah4_fini(void)
590{
591	if (xfrm4_protocol_deregister(&ah4_protocol, IPPROTO_AH) < 0)
592		pr_info("%s: can't remove protocol\n", __func__);
593	xfrm_unregister_type(&ah_type, AF_INET);
 
594}
595
596module_init(ah4_init);
597module_exit(ah4_fini);
598MODULE_LICENSE("GPL");
599MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_AH);
v4.17
 
  1#define pr_fmt(fmt) "IPsec: " fmt
  2
  3#include <crypto/algapi.h>
  4#include <crypto/hash.h>
  5#include <linux/err.h>
  6#include <linux/module.h>
  7#include <linux/slab.h>
  8#include <net/ip.h>
  9#include <net/xfrm.h>
 10#include <net/ah.h>
 11#include <linux/crypto.h>
 12#include <linux/pfkeyv2.h>
 13#include <linux/scatterlist.h>
 14#include <net/icmp.h>
 15#include <net/protocol.h>
 16
 17struct ah_skb_cb {
 18	struct xfrm_skb_cb xfrm;
 19	void *tmp;
 20};
 21
 22#define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
 23
 24static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
 25			  unsigned int size)
 26{
 27	unsigned int len;
 28
 29	len = size + crypto_ahash_digestsize(ahash) +
 30	      (crypto_ahash_alignmask(ahash) &
 31	       ~(crypto_tfm_ctx_alignment() - 1));
 32
 33	len = ALIGN(len, crypto_tfm_ctx_alignment());
 34
 35	len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
 36	len = ALIGN(len, __alignof__(struct scatterlist));
 37
 38	len += sizeof(struct scatterlist) * nfrags;
 39
 40	return kmalloc(len, GFP_ATOMIC);
 41}
 42
 43static inline u8 *ah_tmp_auth(void *tmp, unsigned int offset)
 44{
 45	return tmp + offset;
 46}
 47
 48static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
 49			     unsigned int offset)
 50{
 51	return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
 52}
 53
 54static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
 55					       u8 *icv)
 56{
 57	struct ahash_request *req;
 58
 59	req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
 60				crypto_tfm_ctx_alignment());
 61
 62	ahash_request_set_tfm(req, ahash);
 63
 64	return req;
 65}
 66
 67static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
 68					     struct ahash_request *req)
 69{
 70	return (void *)ALIGN((unsigned long)(req + 1) +
 71			     crypto_ahash_reqsize(ahash),
 72			     __alignof__(struct scatterlist));
 73}
 74
 75/* Clear mutable options and find final destination to substitute
 76 * into IP header for icv calculation. Options are already checked
 77 * for validity, so paranoia is not required. */
 78
 79static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
 80{
 81	unsigned char *optptr = (unsigned char *)(iph+1);
 82	int  l = iph->ihl*4 - sizeof(struct iphdr);
 83	int  optlen;
 84
 85	while (l > 0) {
 86		switch (*optptr) {
 87		case IPOPT_END:
 88			return 0;
 89		case IPOPT_NOOP:
 90			l--;
 91			optptr++;
 92			continue;
 93		}
 94		optlen = optptr[1];
 95		if (optlen<2 || optlen>l)
 96			return -EINVAL;
 97		switch (*optptr) {
 98		case IPOPT_SEC:
 99		case 0x85:	/* Some "Extended Security" crap. */
100		case IPOPT_CIPSO:
101		case IPOPT_RA:
102		case 0x80|21:	/* RFC1770 */
103			break;
104		case IPOPT_LSRR:
105		case IPOPT_SSRR:
106			if (optlen < 6)
107				return -EINVAL;
108			memcpy(daddr, optptr+optlen-4, 4);
109			/* Fall through */
110		default:
111			memset(optptr, 0, optlen);
112		}
113		l -= optlen;
114		optptr += optlen;
115	}
116	return 0;
117}
118
119static void ah_output_done(struct crypto_async_request *base, int err)
120{
121	u8 *icv;
122	struct iphdr *iph;
123	struct sk_buff *skb = base->data;
124	struct xfrm_state *x = skb_dst(skb)->xfrm;
125	struct ah_data *ahp = x->data;
126	struct iphdr *top_iph = ip_hdr(skb);
127	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
128	int ihl = ip_hdrlen(skb);
129
130	iph = AH_SKB_CB(skb)->tmp;
131	icv = ah_tmp_icv(ahp->ahash, iph, ihl);
132	memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
133
134	top_iph->tos = iph->tos;
135	top_iph->ttl = iph->ttl;
136	top_iph->frag_off = iph->frag_off;
137	if (top_iph->ihl != 5) {
138		top_iph->daddr = iph->daddr;
139		memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
140	}
141
142	kfree(AH_SKB_CB(skb)->tmp);
143	xfrm_output_resume(skb, err);
144}
145
146static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
147{
148	int err;
149	int nfrags;
150	int ihl;
151	u8 *icv;
152	struct sk_buff *trailer;
153	struct crypto_ahash *ahash;
154	struct ahash_request *req;
155	struct scatterlist *sg;
156	struct iphdr *iph, *top_iph;
157	struct ip_auth_hdr *ah;
158	struct ah_data *ahp;
159	int seqhi_len = 0;
160	__be32 *seqhi;
161	int sglists = 0;
162	struct scatterlist *seqhisg;
163
164	ahp = x->data;
165	ahash = ahp->ahash;
166
167	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
168		goto out;
169	nfrags = err;
170
171	skb_push(skb, -skb_network_offset(skb));
172	ah = ip_auth_hdr(skb);
173	ihl = ip_hdrlen(skb);
174
175	if (x->props.flags & XFRM_STATE_ESN) {
176		sglists = 1;
177		seqhi_len = sizeof(*seqhi);
178	}
179	err = -ENOMEM;
180	iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl + seqhi_len);
181	if (!iph)
182		goto out;
183	seqhi = (__be32 *)((char *)iph + ihl);
184	icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
185	req = ah_tmp_req(ahash, icv);
186	sg = ah_req_sg(ahash, req);
187	seqhisg = sg + nfrags;
188
189	memset(ah->auth_data, 0, ahp->icv_trunc_len);
190
191	top_iph = ip_hdr(skb);
192
193	iph->tos = top_iph->tos;
194	iph->ttl = top_iph->ttl;
195	iph->frag_off = top_iph->frag_off;
196
197	if (top_iph->ihl != 5) {
198		iph->daddr = top_iph->daddr;
199		memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
200		err = ip_clear_mutable_options(top_iph, &top_iph->daddr);
201		if (err)
202			goto out_free;
203	}
204
205	ah->nexthdr = *skb_mac_header(skb);
206	*skb_mac_header(skb) = IPPROTO_AH;
207
208	top_iph->tos = 0;
209	top_iph->tot_len = htons(skb->len);
210	top_iph->frag_off = 0;
211	top_iph->ttl = 0;
212	top_iph->check = 0;
213
214	if (x->props.flags & XFRM_STATE_ALIGN4)
215		ah->hdrlen  = (XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
216	else
217		ah->hdrlen  = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
218
219	ah->reserved = 0;
220	ah->spi = x->id.spi;
221	ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
222
223	sg_init_table(sg, nfrags + sglists);
224	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
225	if (unlikely(err < 0))
226		goto out_free;
227
228	if (x->props.flags & XFRM_STATE_ESN) {
229		/* Attach seqhi sg right after packet payload */
230		*seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
231		sg_set_buf(seqhisg, seqhi, seqhi_len);
232	}
233	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
234	ahash_request_set_callback(req, 0, ah_output_done, skb);
235
236	AH_SKB_CB(skb)->tmp = iph;
237
238	err = crypto_ahash_digest(req);
239	if (err) {
240		if (err == -EINPROGRESS)
241			goto out;
242
243		if (err == -ENOSPC)
244			err = NET_XMIT_DROP;
245		goto out_free;
246	}
247
248	memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
249
250	top_iph->tos = iph->tos;
251	top_iph->ttl = iph->ttl;
252	top_iph->frag_off = iph->frag_off;
253	if (top_iph->ihl != 5) {
254		top_iph->daddr = iph->daddr;
255		memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
256	}
257
258out_free:
259	kfree(iph);
260out:
261	return err;
262}
263
264static void ah_input_done(struct crypto_async_request *base, int err)
265{
266	u8 *auth_data;
267	u8 *icv;
268	struct iphdr *work_iph;
269	struct sk_buff *skb = base->data;
270	struct xfrm_state *x = xfrm_input_state(skb);
271	struct ah_data *ahp = x->data;
272	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
273	int ihl = ip_hdrlen(skb);
274	int ah_hlen = (ah->hdrlen + 2) << 2;
275
276	if (err)
277		goto out;
278
279	work_iph = AH_SKB_CB(skb)->tmp;
280	auth_data = ah_tmp_auth(work_iph, ihl);
281	icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
282
283	err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
284	if (err)
285		goto out;
286
287	err = ah->nexthdr;
288
289	skb->network_header += ah_hlen;
290	memcpy(skb_network_header(skb), work_iph, ihl);
291	__skb_pull(skb, ah_hlen + ihl);
292
293	if (x->props.mode == XFRM_MODE_TUNNEL)
294		skb_reset_transport_header(skb);
295	else
296		skb_set_transport_header(skb, -ihl);
297out:
298	kfree(AH_SKB_CB(skb)->tmp);
299	xfrm_input_resume(skb, err);
300}
301
302static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
303{
304	int ah_hlen;
305	int ihl;
306	int nexthdr;
307	int nfrags;
308	u8 *auth_data;
309	u8 *icv;
310	struct sk_buff *trailer;
311	struct crypto_ahash *ahash;
312	struct ahash_request *req;
313	struct scatterlist *sg;
314	struct iphdr *iph, *work_iph;
315	struct ip_auth_hdr *ah;
316	struct ah_data *ahp;
317	int err = -ENOMEM;
318	int seqhi_len = 0;
319	__be32 *seqhi;
320	int sglists = 0;
321	struct scatterlist *seqhisg;
322
323	if (!pskb_may_pull(skb, sizeof(*ah)))
324		goto out;
325
326	ah = (struct ip_auth_hdr *)skb->data;
327	ahp = x->data;
328	ahash = ahp->ahash;
329
330	nexthdr = ah->nexthdr;
331	ah_hlen = (ah->hdrlen + 2) << 2;
332
333	if (x->props.flags & XFRM_STATE_ALIGN4) {
334		if (ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_full_len) &&
335		    ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len))
336			goto out;
337	} else {
338		if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
339		    ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
340			goto out;
341	}
342
343	if (!pskb_may_pull(skb, ah_hlen))
344		goto out;
345
346	/* We are going to _remove_ AH header to keep sockets happy,
347	 * so... Later this can change. */
348	if (skb_unclone(skb, GFP_ATOMIC))
349		goto out;
350
351	skb->ip_summed = CHECKSUM_NONE;
352
353
354	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
355		goto out;
356	nfrags = err;
357
358	ah = (struct ip_auth_hdr *)skb->data;
359	iph = ip_hdr(skb);
360	ihl = ip_hdrlen(skb);
361
362	if (x->props.flags & XFRM_STATE_ESN) {
363		sglists = 1;
364		seqhi_len = sizeof(*seqhi);
365	}
366
367	work_iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl +
368				ahp->icv_trunc_len + seqhi_len);
369	if (!work_iph) {
370		err = -ENOMEM;
371		goto out;
372	}
373
374	seqhi = (__be32 *)((char *)work_iph + ihl);
375	auth_data = ah_tmp_auth(seqhi, seqhi_len);
376	icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len);
377	req = ah_tmp_req(ahash, icv);
378	sg = ah_req_sg(ahash, req);
379	seqhisg = sg + nfrags;
380
381	memcpy(work_iph, iph, ihl);
382	memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
383	memset(ah->auth_data, 0, ahp->icv_trunc_len);
384
385	iph->ttl = 0;
386	iph->tos = 0;
387	iph->frag_off = 0;
388	iph->check = 0;
389	if (ihl > sizeof(*iph)) {
390		__be32 dummy;
391		err = ip_clear_mutable_options(iph, &dummy);
392		if (err)
393			goto out_free;
394	}
395
396	skb_push(skb, ihl);
397
398	sg_init_table(sg, nfrags + sglists);
399	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
400	if (unlikely(err < 0))
401		goto out_free;
402
403	if (x->props.flags & XFRM_STATE_ESN) {
404		/* Attach seqhi sg right after packet payload */
405		*seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
406		sg_set_buf(seqhisg, seqhi, seqhi_len);
407	}
408	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
409	ahash_request_set_callback(req, 0, ah_input_done, skb);
410
411	AH_SKB_CB(skb)->tmp = work_iph;
412
413	err = crypto_ahash_digest(req);
414	if (err) {
415		if (err == -EINPROGRESS)
416			goto out;
417
418		goto out_free;
419	}
420
421	err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
422	if (err)
423		goto out_free;
424
425	skb->network_header += ah_hlen;
426	memcpy(skb_network_header(skb), work_iph, ihl);
427	__skb_pull(skb, ah_hlen + ihl);
428	if (x->props.mode == XFRM_MODE_TUNNEL)
429		skb_reset_transport_header(skb);
430	else
431		skb_set_transport_header(skb, -ihl);
432
433	err = nexthdr;
434
435out_free:
436	kfree (work_iph);
437out:
438	return err;
439}
440
441static int ah4_err(struct sk_buff *skb, u32 info)
442{
443	struct net *net = dev_net(skb->dev);
444	const struct iphdr *iph = (const struct iphdr *)skb->data;
445	struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
446	struct xfrm_state *x;
447
448	switch (icmp_hdr(skb)->type) {
449	case ICMP_DEST_UNREACH:
450		if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
451			return 0;
452	case ICMP_REDIRECT:
453		break;
454	default:
455		return 0;
456	}
457
458	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
459			      ah->spi, IPPROTO_AH, AF_INET);
460	if (!x)
461		return 0;
462
463	if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
464		ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
465	else
466		ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);
467	xfrm_state_put(x);
468
469	return 0;
470}
471
472static int ah_init_state(struct xfrm_state *x)
473{
474	struct ah_data *ahp = NULL;
475	struct xfrm_algo_desc *aalg_desc;
476	struct crypto_ahash *ahash;
477
478	if (!x->aalg)
479		goto error;
480
481	if (x->encap)
482		goto error;
483
484	ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
485	if (!ahp)
486		return -ENOMEM;
487
488	ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
489	if (IS_ERR(ahash))
490		goto error;
491
492	ahp->ahash = ahash;
493	if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
494				(x->aalg->alg_key_len + 7) / 8))
495		goto error;
496
497	/*
498	 * Lookup the algorithm description maintained by xfrm_algo,
499	 * verify crypto transform properties, and store information
500	 * we need for AH processing.  This lookup cannot fail here
501	 * after a successful crypto_alloc_ahash().
502	 */
503	aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
504	BUG_ON(!aalg_desc);
505
506	if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
507	    crypto_ahash_digestsize(ahash)) {
508		pr_info("%s: %s digestsize %u != %hu\n",
509			__func__, x->aalg->alg_name,
510			crypto_ahash_digestsize(ahash),
511			aalg_desc->uinfo.auth.icv_fullbits / 8);
512		goto error;
513	}
514
515	ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
516	ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
517
518	if (x->props.flags & XFRM_STATE_ALIGN4)
519		x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) +
520						  ahp->icv_trunc_len);
521	else
522		x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
523						  ahp->icv_trunc_len);
524	if (x->props.mode == XFRM_MODE_TUNNEL)
525		x->props.header_len += sizeof(struct iphdr);
526	x->data = ahp;
527
528	return 0;
529
530error:
531	if (ahp) {
532		crypto_free_ahash(ahp->ahash);
533		kfree(ahp);
534	}
535	return -EINVAL;
536}
537
538static void ah_destroy(struct xfrm_state *x)
539{
540	struct ah_data *ahp = x->data;
541
542	if (!ahp)
543		return;
544
545	crypto_free_ahash(ahp->ahash);
546	kfree(ahp);
547}
548
549static int ah4_rcv_cb(struct sk_buff *skb, int err)
550{
551	return 0;
552}
553
554static const struct xfrm_type ah_type =
555{
556	.description	= "AH4",
557	.owner		= THIS_MODULE,
558	.proto	     	= IPPROTO_AH,
559	.flags		= XFRM_TYPE_REPLAY_PROT,
560	.init_state	= ah_init_state,
561	.destructor	= ah_destroy,
562	.input		= ah_input,
563	.output		= ah_output
564};
565
566static struct xfrm4_protocol ah4_protocol = {
567	.handler	=	xfrm4_rcv,
568	.input_handler	=	xfrm_input,
569	.cb_handler	=	ah4_rcv_cb,
570	.err_handler	=	ah4_err,
571	.priority	=	0,
572};
573
574static int __init ah4_init(void)
575{
576	if (xfrm_register_type(&ah_type, AF_INET) < 0) {
577		pr_info("%s: can't add xfrm type\n", __func__);
578		return -EAGAIN;
579	}
580	if (xfrm4_protocol_register(&ah4_protocol, IPPROTO_AH) < 0) {
581		pr_info("%s: can't add protocol\n", __func__);
582		xfrm_unregister_type(&ah_type, AF_INET);
583		return -EAGAIN;
584	}
585	return 0;
586}
587
588static void __exit ah4_fini(void)
589{
590	if (xfrm4_protocol_deregister(&ah4_protocol, IPPROTO_AH) < 0)
591		pr_info("%s: can't remove protocol\n", __func__);
592	if (xfrm_unregister_type(&ah_type, AF_INET) < 0)
593		pr_info("%s: can't remove xfrm type\n", __func__);
594}
595
596module_init(ah4_init);
597module_exit(ah4_fini);
598MODULE_LICENSE("GPL");
599MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_AH);