Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * Copyright (C)2002 USAGI/WIDE Project
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write to the Free Software
 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 17 *
 18 * Authors
 19 *
 20 *	Mitsuru KANDA @USAGI       : IPv6 Support
 21 * 	Kazunori MIYAZAWA @USAGI   :
 22 * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
 23 *
 24 * 	This file is derived from net/ipv4/ah.c.
 25 */
 26
 
 
 
 27#include <crypto/hash.h>
 28#include <linux/module.h>
 29#include <linux/slab.h>
 30#include <net/ip.h>
 31#include <net/ah.h>
 32#include <linux/crypto.h>
 33#include <linux/pfkeyv2.h>
 34#include <linux/string.h>
 35#include <linux/scatterlist.h>
 
 36#include <net/icmp.h>
 37#include <net/ipv6.h>
 38#include <net/protocol.h>
 39#include <net/xfrm.h>
 40
 41#define IPV6HDR_BASELEN 8
 42
 43struct tmp_ext {
 44#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
 45		struct in6_addr saddr;
 46#endif
 47		struct in6_addr daddr;
 48		char hdrs[0];
 49};
 50
 51struct ah_skb_cb {
 52	struct xfrm_skb_cb xfrm;
 53	void *tmp;
 54};
 55
 56#define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
 57
 58static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
 59			  unsigned int size)
 60{
 61	unsigned int len;
 62
 63	len = size + crypto_ahash_digestsize(ahash) +
 64	      (crypto_ahash_alignmask(ahash) &
 65	       ~(crypto_tfm_ctx_alignment() - 1));
 66
 67	len = ALIGN(len, crypto_tfm_ctx_alignment());
 68
 69	len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
 70	len = ALIGN(len, __alignof__(struct scatterlist));
 71
 72	len += sizeof(struct scatterlist) * nfrags;
 73
 74	return kmalloc(len, GFP_ATOMIC);
 75}
 76
 77static inline struct tmp_ext *ah_tmp_ext(void *base)
 78{
 79	return base + IPV6HDR_BASELEN;
 80}
 81
 82static inline u8 *ah_tmp_auth(u8 *tmp, unsigned int offset)
 83{
 84	return tmp + offset;
 85}
 86
 87static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
 88			     unsigned int offset)
 89{
 90	return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
 91}
 92
 93static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
 94					       u8 *icv)
 95{
 96	struct ahash_request *req;
 97
 98	req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
 99				crypto_tfm_ctx_alignment());
100
101	ahash_request_set_tfm(req, ahash);
102
103	return req;
104}
105
106static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
107					     struct ahash_request *req)
108{
109	return (void *)ALIGN((unsigned long)(req + 1) +
110			     crypto_ahash_reqsize(ahash),
111			     __alignof__(struct scatterlist));
112}
113
114static int zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
115{
116	u8 *opt = (u8 *)opthdr;
117	int len = ipv6_optlen(opthdr);
118	int off = 0;
119	int optlen = 0;
120
121	off += 2;
122	len -= 2;
123
124	while (len > 0) {
125
126		switch (opt[off]) {
127
128		case IPV6_TLV_PAD0:
129			optlen = 1;
130			break;
131		default:
132			if (len < 2)
133				goto bad;
134			optlen = opt[off+1]+2;
135			if (len < optlen)
136				goto bad;
137			if (opt[off] & 0x20)
138				memset(&opt[off+2], 0, opt[off+1]);
139			break;
140		}
141
142		off += optlen;
143		len -= optlen;
144	}
145	if (len == 0)
146		return 1;
147
148bad:
149	return 0;
150}
151
152#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
153/**
154 *	ipv6_rearrange_destopt - rearrange IPv6 destination options header
155 *	@iph: IPv6 header
156 *	@destopt: destionation options header
157 */
158static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt)
159{
160	u8 *opt = (u8 *)destopt;
161	int len = ipv6_optlen(destopt);
162	int off = 0;
163	int optlen = 0;
164
165	off += 2;
166	len -= 2;
167
168	while (len > 0) {
169
170		switch (opt[off]) {
171
172		case IPV6_TLV_PAD0:
173			optlen = 1;
174			break;
175		default:
176			if (len < 2)
177				goto bad;
178			optlen = opt[off+1]+2;
179			if (len < optlen)
180				goto bad;
181
182			/* Rearrange the source address in @iph and the
183			 * addresses in home address option for final source.
184			 * See 11.3.2 of RFC 3775 for details.
185			 */
186			if (opt[off] == IPV6_TLV_HAO) {
187				struct in6_addr final_addr;
188				struct ipv6_destopt_hao *hao;
189
190				hao = (struct ipv6_destopt_hao *)&opt[off];
191				if (hao->length != sizeof(hao->addr)) {
192					if (net_ratelimit())
193						printk(KERN_WARNING "destopt hao: invalid header length: %u\n", hao->length);
194					goto bad;
195				}
196				ipv6_addr_copy(&final_addr, &hao->addr);
197				ipv6_addr_copy(&hao->addr, &iph->saddr);
198				ipv6_addr_copy(&iph->saddr, &final_addr);
199			}
200			break;
201		}
202
203		off += optlen;
204		len -= optlen;
205	}
206	/* Note: ok if len == 0 */
207bad:
208	return;
209}
210#else
211static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt) {}
212#endif
213
214/**
215 *	ipv6_rearrange_rthdr - rearrange IPv6 routing header
216 *	@iph: IPv6 header
217 *	@rthdr: routing header
218 *
219 *	Rearrange the destination address in @iph and the addresses in @rthdr
220 *	so that they appear in the order they will at the final destination.
221 *	See Appendix A2 of RFC 2402 for details.
222 */
223static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr)
224{
225	int segments, segments_left;
226	struct in6_addr *addrs;
227	struct in6_addr final_addr;
228
229	segments_left = rthdr->segments_left;
230	if (segments_left == 0)
231		return;
232	rthdr->segments_left = 0;
233
234	/* The value of rthdr->hdrlen has been verified either by the system
235	 * call if it is locally generated, or by ipv6_rthdr_rcv() for incoming
236	 * packets.  So we can assume that it is even and that segments is
237	 * greater than or equal to segments_left.
238	 *
239	 * For the same reason we can assume that this option is of type 0.
240	 */
241	segments = rthdr->hdrlen >> 1;
242
243	addrs = ((struct rt0_hdr *)rthdr)->addr;
244	ipv6_addr_copy(&final_addr, addrs + segments - 1);
245
246	addrs += segments - segments_left;
247	memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs));
248
249	ipv6_addr_copy(addrs, &iph->daddr);
250	ipv6_addr_copy(&iph->daddr, &final_addr);
251}
252
253static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
254{
255	union {
256		struct ipv6hdr *iph;
257		struct ipv6_opt_hdr *opth;
258		struct ipv6_rt_hdr *rth;
259		char *raw;
260	} exthdr = { .iph = iph };
261	char *end = exthdr.raw + len;
262	int nexthdr = iph->nexthdr;
263
264	exthdr.iph++;
265
266	while (exthdr.raw < end) {
267		switch (nexthdr) {
268		case NEXTHDR_DEST:
269			if (dir == XFRM_POLICY_OUT)
270				ipv6_rearrange_destopt(iph, exthdr.opth);
 
271		case NEXTHDR_HOP:
272			if (!zero_out_mutable_opts(exthdr.opth)) {
273				LIMIT_NETDEBUG(
274					KERN_WARNING "overrun %sopts\n",
275					nexthdr == NEXTHDR_HOP ?
276						"hop" : "dest");
277				return -EINVAL;
278			}
279			break;
280
281		case NEXTHDR_ROUTING:
282			ipv6_rearrange_rthdr(iph, exthdr.rth);
283			break;
284
285		default :
286			return 0;
287		}
288
289		nexthdr = exthdr.opth->nexthdr;
290		exthdr.raw += ipv6_optlen(exthdr.opth);
291	}
292
293	return 0;
294}
295
296static void ah6_output_done(struct crypto_async_request *base, int err)
297{
298	int extlen;
299	u8 *iph_base;
300	u8 *icv;
301	struct sk_buff *skb = base->data;
302	struct xfrm_state *x = skb_dst(skb)->xfrm;
303	struct ah_data *ahp = x->data;
304	struct ipv6hdr *top_iph = ipv6_hdr(skb);
305	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
306	struct tmp_ext *iph_ext;
307
308	extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
309	if (extlen)
310		extlen += sizeof(*iph_ext);
311
312	iph_base = AH_SKB_CB(skb)->tmp;
313	iph_ext = ah_tmp_ext(iph_base);
314	icv = ah_tmp_icv(ahp->ahash, iph_ext, extlen);
315
316	memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
317	memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
318
319	if (extlen) {
320#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
321		memcpy(&top_iph->saddr, iph_ext, extlen);
322#else
323		memcpy(&top_iph->daddr, iph_ext, extlen);
324#endif
325	}
326
327	err = ah->nexthdr;
328
329	kfree(AH_SKB_CB(skb)->tmp);
330	xfrm_output_resume(skb, err);
331}
332
333static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
334{
335	int err;
336	int nfrags;
337	int extlen;
338	u8 *iph_base;
339	u8 *icv;
340	u8 nexthdr;
341	struct sk_buff *trailer;
342	struct crypto_ahash *ahash;
343	struct ahash_request *req;
344	struct scatterlist *sg;
345	struct ipv6hdr *top_iph;
346	struct ip_auth_hdr *ah;
347	struct ah_data *ahp;
348	struct tmp_ext *iph_ext;
 
 
 
 
349
350	ahp = x->data;
351	ahash = ahp->ahash;
352
353	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
 
354		goto out;
355	nfrags = err;
356
357	skb_push(skb, -skb_network_offset(skb));
358	extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
359	if (extlen)
360		extlen += sizeof(*iph_ext);
361
 
 
 
 
362	err = -ENOMEM;
363	iph_base = ah_alloc_tmp(ahash, nfrags, IPV6HDR_BASELEN + extlen);
 
364	if (!iph_base)
365		goto out;
366
367	iph_ext = ah_tmp_ext(iph_base);
368	icv = ah_tmp_icv(ahash, iph_ext, extlen);
 
369	req = ah_tmp_req(ahash, icv);
370	sg = ah_req_sg(ahash, req);
 
371
372	ah = ip_auth_hdr(skb);
373	memset(ah->auth_data, 0, ahp->icv_trunc_len);
374
375	top_iph = ipv6_hdr(skb);
376	top_iph->payload_len = htons(skb->len - sizeof(*top_iph));
377
378	nexthdr = *skb_mac_header(skb);
379	*skb_mac_header(skb) = IPPROTO_AH;
380
381	/* When there are no extension headers, we only need to save the first
382	 * 8 bytes of the base IP header.
383	 */
384	memcpy(iph_base, top_iph, IPV6HDR_BASELEN);
385
386	if (extlen) {
387#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
388		memcpy(iph_ext, &top_iph->saddr, extlen);
389#else
390		memcpy(iph_ext, &top_iph->daddr, extlen);
391#endif
392		err = ipv6_clear_mutable_options(top_iph,
393						 extlen - sizeof(*iph_ext) +
394						 sizeof(*top_iph),
395						 XFRM_POLICY_OUT);
396		if (err)
397			goto out_free;
398	}
399
400	ah->nexthdr = nexthdr;
401
402	top_iph->priority    = 0;
403	top_iph->flow_lbl[0] = 0;
404	top_iph->flow_lbl[1] = 0;
405	top_iph->flow_lbl[2] = 0;
406	top_iph->hop_limit   = 0;
407
408	ah->hdrlen  = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
409
410	ah->reserved = 0;
411	ah->spi = x->id.spi;
412	ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
413
414	sg_init_table(sg, nfrags);
415	skb_to_sgvec(skb, sg, 0, skb->len);
 
 
416
417	ahash_request_set_crypt(req, sg, icv, skb->len);
 
 
 
 
 
418	ahash_request_set_callback(req, 0, ah6_output_done, skb);
419
420	AH_SKB_CB(skb)->tmp = iph_base;
421
422	err = crypto_ahash_digest(req);
423	if (err) {
424		if (err == -EINPROGRESS)
425			goto out;
426
427		if (err == -EBUSY)
428			err = NET_XMIT_DROP;
429		goto out_free;
430	}
431
432	memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
433	memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
434
435	if (extlen) {
436#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
437		memcpy(&top_iph->saddr, iph_ext, extlen);
438#else
439		memcpy(&top_iph->daddr, iph_ext, extlen);
440#endif
441	}
442
443out_free:
444	kfree(iph_base);
445out:
446	return err;
447}
448
449static void ah6_input_done(struct crypto_async_request *base, int err)
450{
451	u8 *auth_data;
452	u8 *icv;
453	u8 *work_iph;
454	struct sk_buff *skb = base->data;
455	struct xfrm_state *x = xfrm_input_state(skb);
456	struct ah_data *ahp = x->data;
457	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
458	int hdr_len = skb_network_header_len(skb);
459	int ah_hlen = (ah->hdrlen + 2) << 2;
 
 
 
460
461	work_iph = AH_SKB_CB(skb)->tmp;
462	auth_data = ah_tmp_auth(work_iph, hdr_len);
463	icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
464
465	err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
466	if (err)
467		goto out;
468
 
 
469	skb->network_header += ah_hlen;
470	memcpy(skb_network_header(skb), work_iph, hdr_len);
471	__skb_pull(skb, ah_hlen + hdr_len);
472	skb_set_transport_header(skb, -hdr_len);
473
474	err = ah->nexthdr;
 
475out:
476	kfree(AH_SKB_CB(skb)->tmp);
477	xfrm_input_resume(skb, err);
478}
479
480
481
482static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
483{
484	/*
485	 * Before process AH
486	 * [IPv6][Ext1][Ext2][AH][Dest][Payload]
487	 * |<-------------->| hdr_len
488	 *
489	 * To erase AH:
490	 * Keeping copy of cleared headers. After AH processing,
491	 * Moving the pointer of skb->network_header by using skb_pull as long
492	 * as AH header length. Then copy back the copy as long as hdr_len
493	 * If destination header following AH exists, copy it into after [Ext2].
494	 *
495	 * |<>|[IPv6][Ext1][Ext2][Dest][Payload]
496	 * There is offset of AH before IPv6 header after the process.
497	 */
498
499	u8 *auth_data;
500	u8 *icv;
501	u8 *work_iph;
502	struct sk_buff *trailer;
503	struct crypto_ahash *ahash;
504	struct ahash_request *req;
505	struct scatterlist *sg;
506	struct ip_auth_hdr *ah;
507	struct ipv6hdr *ip6h;
508	struct ah_data *ahp;
509	u16 hdr_len;
510	u16 ah_hlen;
511	int nexthdr;
512	int nfrags;
513	int err = -ENOMEM;
 
 
 
 
514
515	if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
516		goto out;
517
518	/* We are going to _remove_ AH header to keep sockets happy,
519	 * so... Later this can change. */
520	if (skb_cloned(skb) &&
521	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
522		goto out;
523
524	skb->ip_summed = CHECKSUM_NONE;
525
526	hdr_len = skb_network_header_len(skb);
527	ah = (struct ip_auth_hdr *)skb->data;
528	ahp = x->data;
529	ahash = ahp->ahash;
530
531	nexthdr = ah->nexthdr;
532	ah_hlen = (ah->hdrlen + 2) << 2;
533
534	if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
535	    ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
536		goto out;
537
538	if (!pskb_may_pull(skb, ah_hlen))
539		goto out;
540
541
542	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
543		goto out;
544	nfrags = err;
545
546	ah = (struct ip_auth_hdr *)skb->data;
547	ip6h = ipv6_hdr(skb);
548
549	skb_push(skb, hdr_len);
550
551	work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len);
552	if (!work_iph)
 
 
 
 
 
 
 
553		goto out;
 
554
555	auth_data = ah_tmp_auth(work_iph, hdr_len);
556	icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len);
 
557	req = ah_tmp_req(ahash, icv);
558	sg = ah_req_sg(ahash, req);
 
559
560	memcpy(work_iph, ip6h, hdr_len);
561	memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
562	memset(ah->auth_data, 0, ahp->icv_trunc_len);
563
564	if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN))
565		goto out_free;
566
567	ip6h->priority    = 0;
568	ip6h->flow_lbl[0] = 0;
569	ip6h->flow_lbl[1] = 0;
570	ip6h->flow_lbl[2] = 0;
571	ip6h->hop_limit   = 0;
572
573	sg_init_table(sg, nfrags);
574	skb_to_sgvec(skb, sg, 0, skb->len);
 
 
 
 
 
 
 
 
575
576	ahash_request_set_crypt(req, sg, icv, skb->len);
577	ahash_request_set_callback(req, 0, ah6_input_done, skb);
578
579	AH_SKB_CB(skb)->tmp = work_iph;
580
581	err = crypto_ahash_digest(req);
582	if (err) {
583		if (err == -EINPROGRESS)
584			goto out;
585
586		if (err == -EBUSY)
587			err = NET_XMIT_DROP;
588		goto out_free;
589	}
590
591	err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
592	if (err)
593		goto out_free;
594
595	skb->network_header += ah_hlen;
596	memcpy(skb_network_header(skb), work_iph, hdr_len);
597	skb->transport_header = skb->network_header;
598	__skb_pull(skb, ah_hlen + hdr_len);
599
 
 
 
 
 
600	err = nexthdr;
601
602out_free:
603	kfree(work_iph);
604out:
605	return err;
606}
607
608static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
609		    u8 type, u8 code, int offset, __be32 info)
610{
611	struct net *net = dev_net(skb->dev);
612	struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
613	struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset);
614	struct xfrm_state *x;
615
616	if (type != ICMPV6_DEST_UNREACH &&
617	    type != ICMPV6_PKT_TOOBIG)
618		return;
619
620	x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
621	if (!x)
622		return;
623
624	NETDEBUG(KERN_DEBUG "pmtu discovery on SA AH/%08x/%pI6\n",
625		 ntohl(ah->spi), &iph->daddr);
626
 
 
 
 
 
627	xfrm_state_put(x);
 
 
628}
629
630static int ah6_init_state(struct xfrm_state *x)
631{
632	struct ah_data *ahp = NULL;
633	struct xfrm_algo_desc *aalg_desc;
634	struct crypto_ahash *ahash;
635
636	if (!x->aalg)
637		goto error;
638
639	if (x->encap)
640		goto error;
641
642	ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
643	if (ahp == NULL)
644		return -ENOMEM;
645
646	ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
647	if (IS_ERR(ahash))
648		goto error;
649
650	ahp->ahash = ahash;
651	if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
652			       (x->aalg->alg_key_len + 7) / 8))
653		goto error;
654
655	/*
656	 * Lookup the algorithm description maintained by xfrm_algo,
657	 * verify crypto transform properties, and store information
658	 * we need for AH processing.  This lookup cannot fail here
659	 * after a successful crypto_alloc_hash().
660	 */
661	aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
662	BUG_ON(!aalg_desc);
663
664	if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
665	    crypto_ahash_digestsize(ahash)) {
666		printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
667		       x->aalg->alg_name, crypto_ahash_digestsize(ahash),
668		       aalg_desc->uinfo.auth.icv_fullbits/8);
669		goto error;
670	}
671
672	ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
673	ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
674
675	BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
676
677	x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
678					  ahp->icv_trunc_len);
679	switch (x->props.mode) {
680	case XFRM_MODE_BEET:
681	case XFRM_MODE_TRANSPORT:
682		break;
683	case XFRM_MODE_TUNNEL:
684		x->props.header_len += sizeof(struct ipv6hdr);
685		break;
686	default:
687		goto error;
688	}
689	x->data = ahp;
690
691	return 0;
692
693error:
694	if (ahp) {
695		crypto_free_ahash(ahp->ahash);
696		kfree(ahp);
697	}
698	return -EINVAL;
699}
700
701static void ah6_destroy(struct xfrm_state *x)
702{
703	struct ah_data *ahp = x->data;
704
705	if (!ahp)
706		return;
707
708	crypto_free_ahash(ahp->ahash);
709	kfree(ahp);
710}
711
712static const struct xfrm_type ah6_type =
713{
 
 
 
 
714	.description	= "AH6",
715	.owner		= THIS_MODULE,
716	.proto	     	= IPPROTO_AH,
717	.flags		= XFRM_TYPE_REPLAY_PROT,
718	.init_state	= ah6_init_state,
719	.destructor	= ah6_destroy,
720	.input		= ah6_input,
721	.output		= ah6_output,
722	.hdr_offset	= xfrm6_find_1stfragopt,
723};
724
725static const struct inet6_protocol ah6_protocol = {
726	.handler	=	xfrm6_rcv,
 
 
727	.err_handler	=	ah6_err,
728	.flags		=	INET6_PROTO_NOPOLICY,
729};
730
731static int __init ah6_init(void)
732{
733	if (xfrm_register_type(&ah6_type, AF_INET6) < 0) {
734		printk(KERN_INFO "ipv6 ah init: can't add xfrm type\n");
735		return -EAGAIN;
736	}
737
738	if (inet6_add_protocol(&ah6_protocol, IPPROTO_AH) < 0) {
739		printk(KERN_INFO "ipv6 ah init: can't add protocol\n");
740		xfrm_unregister_type(&ah6_type, AF_INET6);
741		return -EAGAIN;
742	}
743
744	return 0;
745}
746
747static void __exit ah6_fini(void)
748{
749	if (inet6_del_protocol(&ah6_protocol, IPPROTO_AH) < 0)
750		printk(KERN_INFO "ipv6 ah close: can't remove protocol\n");
751
752	if (xfrm_unregister_type(&ah6_type, AF_INET6) < 0)
753		printk(KERN_INFO "ipv6 ah close: can't remove xfrm type\n");
754
 
755}
756
757module_init(ah6_init);
758module_exit(ah6_fini);
759
760MODULE_LICENSE("GPL");
761MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_AH);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright (C)2002 USAGI/WIDE Project
  4 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 * Authors
  6 *
  7 *	Mitsuru KANDA @USAGI       : IPv6 Support
  8 *	Kazunori MIYAZAWA @USAGI   :
  9 *	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
 10 *
 11 *	This file is derived from net/ipv4/ah.c.
 12 */
 13
 14#define pr_fmt(fmt) "IPv6: " fmt
 15
 16#include <crypto/algapi.h>
 17#include <crypto/hash.h>
 18#include <linux/module.h>
 19#include <linux/slab.h>
 20#include <net/ip.h>
 21#include <net/ah.h>
 22#include <linux/crypto.h>
 23#include <linux/pfkeyv2.h>
 24#include <linux/string.h>
 25#include <linux/scatterlist.h>
 26#include <net/ip6_route.h>
 27#include <net/icmp.h>
 28#include <net/ipv6.h>
 29#include <net/protocol.h>
 30#include <net/xfrm.h>
 31
 32#define IPV6HDR_BASELEN 8
 33
 34struct tmp_ext {
 35#if IS_ENABLED(CONFIG_IPV6_MIP6)
 36		struct in6_addr saddr;
 37#endif
 38		struct in6_addr daddr;
 39		char hdrs[];
 40};
 41
 42struct ah_skb_cb {
 43	struct xfrm_skb_cb xfrm;
 44	void *tmp;
 45};
 46
 47#define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
 48
 49static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
 50			  unsigned int size)
 51{
 52	unsigned int len;
 53
 54	len = size + crypto_ahash_digestsize(ahash) +
 55	      (crypto_ahash_alignmask(ahash) &
 56	       ~(crypto_tfm_ctx_alignment() - 1));
 57
 58	len = ALIGN(len, crypto_tfm_ctx_alignment());
 59
 60	len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
 61	len = ALIGN(len, __alignof__(struct scatterlist));
 62
 63	len += sizeof(struct scatterlist) * nfrags;
 64
 65	return kmalloc(len, GFP_ATOMIC);
 66}
 67
 68static inline struct tmp_ext *ah_tmp_ext(void *base)
 69{
 70	return base + IPV6HDR_BASELEN;
 71}
 72
 73static inline u8 *ah_tmp_auth(u8 *tmp, unsigned int offset)
 74{
 75	return tmp + offset;
 76}
 77
 78static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
 79			     unsigned int offset)
 80{
 81	return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
 82}
 83
 84static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
 85					       u8 *icv)
 86{
 87	struct ahash_request *req;
 88
 89	req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
 90				crypto_tfm_ctx_alignment());
 91
 92	ahash_request_set_tfm(req, ahash);
 93
 94	return req;
 95}
 96
 97static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
 98					     struct ahash_request *req)
 99{
100	return (void *)ALIGN((unsigned long)(req + 1) +
101			     crypto_ahash_reqsize(ahash),
102			     __alignof__(struct scatterlist));
103}
104
105static bool zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
106{
107	u8 *opt = (u8 *)opthdr;
108	int len = ipv6_optlen(opthdr);
109	int off = 0;
110	int optlen = 0;
111
112	off += 2;
113	len -= 2;
114
115	while (len > 0) {
116
117		switch (opt[off]) {
118
119		case IPV6_TLV_PAD1:
120			optlen = 1;
121			break;
122		default:
123			if (len < 2)
124				goto bad;
125			optlen = opt[off+1]+2;
126			if (len < optlen)
127				goto bad;
128			if (opt[off] & 0x20)
129				memset(&opt[off+2], 0, opt[off+1]);
130			break;
131		}
132
133		off += optlen;
134		len -= optlen;
135	}
136	if (len == 0)
137		return true;
138
139bad:
140	return false;
141}
142
143#if IS_ENABLED(CONFIG_IPV6_MIP6)
144/**
145 *	ipv6_rearrange_destopt - rearrange IPv6 destination options header
146 *	@iph: IPv6 header
147 *	@destopt: destionation options header
148 */
149static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt)
150{
151	u8 *opt = (u8 *)destopt;
152	int len = ipv6_optlen(destopt);
153	int off = 0;
154	int optlen = 0;
155
156	off += 2;
157	len -= 2;
158
159	while (len > 0) {
160
161		switch (opt[off]) {
162
163		case IPV6_TLV_PAD1:
164			optlen = 1;
165			break;
166		default:
167			if (len < 2)
168				goto bad;
169			optlen = opt[off+1]+2;
170			if (len < optlen)
171				goto bad;
172
173			/* Rearrange the source address in @iph and the
174			 * addresses in home address option for final source.
175			 * See 11.3.2 of RFC 3775 for details.
176			 */
177			if (opt[off] == IPV6_TLV_HAO) {
178				struct in6_addr final_addr;
179				struct ipv6_destopt_hao *hao;
180
181				hao = (struct ipv6_destopt_hao *)&opt[off];
182				if (hao->length != sizeof(hao->addr)) {
183					net_warn_ratelimited("destopt hao: invalid header length: %u\n",
184							     hao->length);
185					goto bad;
186				}
187				final_addr = hao->addr;
188				hao->addr = iph->saddr;
189				iph->saddr = final_addr;
190			}
191			break;
192		}
193
194		off += optlen;
195		len -= optlen;
196	}
197	/* Note: ok if len == 0 */
198bad:
199	return;
200}
201#else
202static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt) {}
203#endif
204
205/**
206 *	ipv6_rearrange_rthdr - rearrange IPv6 routing header
207 *	@iph: IPv6 header
208 *	@rthdr: routing header
209 *
210 *	Rearrange the destination address in @iph and the addresses in @rthdr
211 *	so that they appear in the order they will at the final destination.
212 *	See Appendix A2 of RFC 2402 for details.
213 */
214static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr)
215{
216	int segments, segments_left;
217	struct in6_addr *addrs;
218	struct in6_addr final_addr;
219
220	segments_left = rthdr->segments_left;
221	if (segments_left == 0)
222		return;
223	rthdr->segments_left = 0;
224
225	/* The value of rthdr->hdrlen has been verified either by the system
226	 * call if it is locally generated, or by ipv6_rthdr_rcv() for incoming
227	 * packets.  So we can assume that it is even and that segments is
228	 * greater than or equal to segments_left.
229	 *
230	 * For the same reason we can assume that this option is of type 0.
231	 */
232	segments = rthdr->hdrlen >> 1;
233
234	addrs = ((struct rt0_hdr *)rthdr)->addr;
235	final_addr = addrs[segments - 1];
236
237	addrs += segments - segments_left;
238	memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs));
239
240	addrs[0] = iph->daddr;
241	iph->daddr = final_addr;
242}
243
244static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
245{
246	union {
247		struct ipv6hdr *iph;
248		struct ipv6_opt_hdr *opth;
249		struct ipv6_rt_hdr *rth;
250		char *raw;
251	} exthdr = { .iph = iph };
252	char *end = exthdr.raw + len;
253	int nexthdr = iph->nexthdr;
254
255	exthdr.iph++;
256
257	while (exthdr.raw < end) {
258		switch (nexthdr) {
259		case NEXTHDR_DEST:
260			if (dir == XFRM_POLICY_OUT)
261				ipv6_rearrange_destopt(iph, exthdr.opth);
262			fallthrough;
263		case NEXTHDR_HOP:
264			if (!zero_out_mutable_opts(exthdr.opth)) {
265				net_dbg_ratelimited("overrun %sopts\n",
266						    nexthdr == NEXTHDR_HOP ?
267						    "hop" : "dest");
 
268				return -EINVAL;
269			}
270			break;
271
272		case NEXTHDR_ROUTING:
273			ipv6_rearrange_rthdr(iph, exthdr.rth);
274			break;
275
276		default:
277			return 0;
278		}
279
280		nexthdr = exthdr.opth->nexthdr;
281		exthdr.raw += ipv6_optlen(exthdr.opth);
282	}
283
284	return 0;
285}
286
287static void ah6_output_done(struct crypto_async_request *base, int err)
288{
289	int extlen;
290	u8 *iph_base;
291	u8 *icv;
292	struct sk_buff *skb = base->data;
293	struct xfrm_state *x = skb_dst(skb)->xfrm;
294	struct ah_data *ahp = x->data;
295	struct ipv6hdr *top_iph = ipv6_hdr(skb);
296	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
297	struct tmp_ext *iph_ext;
298
299	extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
300	if (extlen)
301		extlen += sizeof(*iph_ext);
302
303	iph_base = AH_SKB_CB(skb)->tmp;
304	iph_ext = ah_tmp_ext(iph_base);
305	icv = ah_tmp_icv(ahp->ahash, iph_ext, extlen);
306
307	memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
308	memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
309
310	if (extlen) {
311#if IS_ENABLED(CONFIG_IPV6_MIP6)
312		memcpy(&top_iph->saddr, iph_ext, extlen);
313#else
314		memcpy(&top_iph->daddr, iph_ext, extlen);
315#endif
316	}
317
 
 
318	kfree(AH_SKB_CB(skb)->tmp);
319	xfrm_output_resume(skb, err);
320}
321
322static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
323{
324	int err;
325	int nfrags;
326	int extlen;
327	u8 *iph_base;
328	u8 *icv;
329	u8 nexthdr;
330	struct sk_buff *trailer;
331	struct crypto_ahash *ahash;
332	struct ahash_request *req;
333	struct scatterlist *sg;
334	struct ipv6hdr *top_iph;
335	struct ip_auth_hdr *ah;
336	struct ah_data *ahp;
337	struct tmp_ext *iph_ext;
338	int seqhi_len = 0;
339	__be32 *seqhi;
340	int sglists = 0;
341	struct scatterlist *seqhisg;
342
343	ahp = x->data;
344	ahash = ahp->ahash;
345
346	err = skb_cow_data(skb, 0, &trailer);
347	if (err < 0)
348		goto out;
349	nfrags = err;
350
351	skb_push(skb, -skb_network_offset(skb));
352	extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
353	if (extlen)
354		extlen += sizeof(*iph_ext);
355
356	if (x->props.flags & XFRM_STATE_ESN) {
357		sglists = 1;
358		seqhi_len = sizeof(*seqhi);
359	}
360	err = -ENOMEM;
361	iph_base = ah_alloc_tmp(ahash, nfrags + sglists, IPV6HDR_BASELEN +
362				extlen + seqhi_len);
363	if (!iph_base)
364		goto out;
365
366	iph_ext = ah_tmp_ext(iph_base);
367	seqhi = (__be32 *)((char *)iph_ext + extlen);
368	icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
369	req = ah_tmp_req(ahash, icv);
370	sg = ah_req_sg(ahash, req);
371	seqhisg = sg + nfrags;
372
373	ah = ip_auth_hdr(skb);
374	memset(ah->auth_data, 0, ahp->icv_trunc_len);
375
376	top_iph = ipv6_hdr(skb);
377	top_iph->payload_len = htons(skb->len - sizeof(*top_iph));
378
379	nexthdr = *skb_mac_header(skb);
380	*skb_mac_header(skb) = IPPROTO_AH;
381
382	/* When there are no extension headers, we only need to save the first
383	 * 8 bytes of the base IP header.
384	 */
385	memcpy(iph_base, top_iph, IPV6HDR_BASELEN);
386
387	if (extlen) {
388#if IS_ENABLED(CONFIG_IPV6_MIP6)
389		memcpy(iph_ext, &top_iph->saddr, extlen);
390#else
391		memcpy(iph_ext, &top_iph->daddr, extlen);
392#endif
393		err = ipv6_clear_mutable_options(top_iph,
394						 extlen - sizeof(*iph_ext) +
395						 sizeof(*top_iph),
396						 XFRM_POLICY_OUT);
397		if (err)
398			goto out_free;
399	}
400
401	ah->nexthdr = nexthdr;
402
403	top_iph->priority    = 0;
404	top_iph->flow_lbl[0] = 0;
405	top_iph->flow_lbl[1] = 0;
406	top_iph->flow_lbl[2] = 0;
407	top_iph->hop_limit   = 0;
408
409	ah->hdrlen  = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
410
411	ah->reserved = 0;
412	ah->spi = x->id.spi;
413	ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
414
415	sg_init_table(sg, nfrags + sglists);
416	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
417	if (unlikely(err < 0))
418		goto out_free;
419
420	if (x->props.flags & XFRM_STATE_ESN) {
421		/* Attach seqhi sg right after packet payload */
422		*seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
423		sg_set_buf(seqhisg, seqhi, seqhi_len);
424	}
425	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
426	ahash_request_set_callback(req, 0, ah6_output_done, skb);
427
428	AH_SKB_CB(skb)->tmp = iph_base;
429
430	err = crypto_ahash_digest(req);
431	if (err) {
432		if (err == -EINPROGRESS)
433			goto out;
434
435		if (err == -ENOSPC)
436			err = NET_XMIT_DROP;
437		goto out_free;
438	}
439
440	memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
441	memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
442
443	if (extlen) {
444#if IS_ENABLED(CONFIG_IPV6_MIP6)
445		memcpy(&top_iph->saddr, iph_ext, extlen);
446#else
447		memcpy(&top_iph->daddr, iph_ext, extlen);
448#endif
449	}
450
451out_free:
452	kfree(iph_base);
453out:
454	return err;
455}
456
457static void ah6_input_done(struct crypto_async_request *base, int err)
458{
459	u8 *auth_data;
460	u8 *icv;
461	u8 *work_iph;
462	struct sk_buff *skb = base->data;
463	struct xfrm_state *x = xfrm_input_state(skb);
464	struct ah_data *ahp = x->data;
465	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
466	int hdr_len = skb_network_header_len(skb);
467	int ah_hlen = ipv6_authlen(ah);
468
469	if (err)
470		goto out;
471
472	work_iph = AH_SKB_CB(skb)->tmp;
473	auth_data = ah_tmp_auth(work_iph, hdr_len);
474	icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
475
476	err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
477	if (err)
478		goto out;
479
480	err = ah->nexthdr;
481
482	skb->network_header += ah_hlen;
483	memcpy(skb_network_header(skb), work_iph, hdr_len);
484	__skb_pull(skb, ah_hlen + hdr_len);
485	if (x->props.mode == XFRM_MODE_TUNNEL)
486		skb_reset_transport_header(skb);
487	else
488		skb_set_transport_header(skb, -hdr_len);
489out:
490	kfree(AH_SKB_CB(skb)->tmp);
491	xfrm_input_resume(skb, err);
492}
493
494
495
496static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
497{
498	/*
499	 * Before process AH
500	 * [IPv6][Ext1][Ext2][AH][Dest][Payload]
501	 * |<-------------->| hdr_len
502	 *
503	 * To erase AH:
504	 * Keeping copy of cleared headers. After AH processing,
505	 * Moving the pointer of skb->network_header by using skb_pull as long
506	 * as AH header length. Then copy back the copy as long as hdr_len
507	 * If destination header following AH exists, copy it into after [Ext2].
508	 *
509	 * |<>|[IPv6][Ext1][Ext2][Dest][Payload]
510	 * There is offset of AH before IPv6 header after the process.
511	 */
512
513	u8 *auth_data;
514	u8 *icv;
515	u8 *work_iph;
516	struct sk_buff *trailer;
517	struct crypto_ahash *ahash;
518	struct ahash_request *req;
519	struct scatterlist *sg;
520	struct ip_auth_hdr *ah;
521	struct ipv6hdr *ip6h;
522	struct ah_data *ahp;
523	u16 hdr_len;
524	u16 ah_hlen;
525	int nexthdr;
526	int nfrags;
527	int err = -ENOMEM;
528	int seqhi_len = 0;
529	__be32 *seqhi;
530	int sglists = 0;
531	struct scatterlist *seqhisg;
532
533	if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
534		goto out;
535
536	/* We are going to _remove_ AH header to keep sockets happy,
537	 * so... Later this can change. */
538	if (skb_unclone(skb, GFP_ATOMIC))
 
539		goto out;
540
541	skb->ip_summed = CHECKSUM_NONE;
542
543	hdr_len = skb_network_header_len(skb);
544	ah = (struct ip_auth_hdr *)skb->data;
545	ahp = x->data;
546	ahash = ahp->ahash;
547
548	nexthdr = ah->nexthdr;
549	ah_hlen = ipv6_authlen(ah);
550
551	if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
552	    ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
553		goto out;
554
555	if (!pskb_may_pull(skb, ah_hlen))
556		goto out;
557
558	err = skb_cow_data(skb, 0, &trailer);
559	if (err < 0)
560		goto out;
561	nfrags = err;
562
563	ah = (struct ip_auth_hdr *)skb->data;
564	ip6h = ipv6_hdr(skb);
565
566	skb_push(skb, hdr_len);
567
568	if (x->props.flags & XFRM_STATE_ESN) {
569		sglists = 1;
570		seqhi_len = sizeof(*seqhi);
571	}
572
573	work_iph = ah_alloc_tmp(ahash, nfrags + sglists, hdr_len +
574				ahp->icv_trunc_len + seqhi_len);
575	if (!work_iph) {
576		err = -ENOMEM;
577		goto out;
578	}
579
580	auth_data = ah_tmp_auth((u8 *)work_iph, hdr_len);
581	seqhi = (__be32 *)(auth_data + ahp->icv_trunc_len);
582	icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
583	req = ah_tmp_req(ahash, icv);
584	sg = ah_req_sg(ahash, req);
585	seqhisg = sg + nfrags;
586
587	memcpy(work_iph, ip6h, hdr_len);
588	memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
589	memset(ah->auth_data, 0, ahp->icv_trunc_len);
590
591	if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN))
592		goto out_free;
593
594	ip6h->priority    = 0;
595	ip6h->flow_lbl[0] = 0;
596	ip6h->flow_lbl[1] = 0;
597	ip6h->flow_lbl[2] = 0;
598	ip6h->hop_limit   = 0;
599
600	sg_init_table(sg, nfrags + sglists);
601	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
602	if (unlikely(err < 0))
603		goto out_free;
604
605	if (x->props.flags & XFRM_STATE_ESN) {
606		/* Attach seqhi sg right after packet payload */
607		*seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
608		sg_set_buf(seqhisg, seqhi, seqhi_len);
609	}
610
611	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
612	ahash_request_set_callback(req, 0, ah6_input_done, skb);
613
614	AH_SKB_CB(skb)->tmp = work_iph;
615
616	err = crypto_ahash_digest(req);
617	if (err) {
618		if (err == -EINPROGRESS)
619			goto out;
620
 
 
621		goto out_free;
622	}
623
624	err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
625	if (err)
626		goto out_free;
627
628	skb->network_header += ah_hlen;
629	memcpy(skb_network_header(skb), work_iph, hdr_len);
 
630	__skb_pull(skb, ah_hlen + hdr_len);
631
632	if (x->props.mode == XFRM_MODE_TUNNEL)
633		skb_reset_transport_header(skb);
634	else
635		skb_set_transport_header(skb, -hdr_len);
636
637	err = nexthdr;
638
639out_free:
640	kfree(work_iph);
641out:
642	return err;
643}
644
645static int ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
646		   u8 type, u8 code, int offset, __be32 info)
647{
648	struct net *net = dev_net(skb->dev);
649	struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
650	struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+offset);
651	struct xfrm_state *x;
652
653	if (type != ICMPV6_PKT_TOOBIG &&
654	    type != NDISC_REDIRECT)
655		return 0;
656
657	x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
658	if (!x)
659		return 0;
 
 
 
660
661	if (type == NDISC_REDIRECT)
662		ip6_redirect(skb, net, skb->dev->ifindex, 0,
663			     sock_net_uid(net, NULL));
664	else
665		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
666	xfrm_state_put(x);
667
668	return 0;
669}
670
671static int ah6_init_state(struct xfrm_state *x)
672{
673	struct ah_data *ahp = NULL;
674	struct xfrm_algo_desc *aalg_desc;
675	struct crypto_ahash *ahash;
676
677	if (!x->aalg)
678		goto error;
679
680	if (x->encap)
681		goto error;
682
683	ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
684	if (!ahp)
685		return -ENOMEM;
686
687	ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
688	if (IS_ERR(ahash))
689		goto error;
690
691	ahp->ahash = ahash;
692	if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
693			       (x->aalg->alg_key_len + 7) / 8))
694		goto error;
695
696	/*
697	 * Lookup the algorithm description maintained by xfrm_algo,
698	 * verify crypto transform properties, and store information
699	 * we need for AH processing.  This lookup cannot fail here
700	 * after a successful crypto_alloc_hash().
701	 */
702	aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
703	BUG_ON(!aalg_desc);
704
705	if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
706	    crypto_ahash_digestsize(ahash)) {
707		pr_info("AH: %s digestsize %u != %hu\n",
708			x->aalg->alg_name, crypto_ahash_digestsize(ahash),
709			aalg_desc->uinfo.auth.icv_fullbits/8);
710		goto error;
711	}
712
713	ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
714	ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
715
 
 
716	x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
717					  ahp->icv_trunc_len);
718	switch (x->props.mode) {
719	case XFRM_MODE_BEET:
720	case XFRM_MODE_TRANSPORT:
721		break;
722	case XFRM_MODE_TUNNEL:
723		x->props.header_len += sizeof(struct ipv6hdr);
724		break;
725	default:
726		goto error;
727	}
728	x->data = ahp;
729
730	return 0;
731
732error:
733	if (ahp) {
734		crypto_free_ahash(ahp->ahash);
735		kfree(ahp);
736	}
737	return -EINVAL;
738}
739
740static void ah6_destroy(struct xfrm_state *x)
741{
742	struct ah_data *ahp = x->data;
743
744	if (!ahp)
745		return;
746
747	crypto_free_ahash(ahp->ahash);
748	kfree(ahp);
749}
750
751static int ah6_rcv_cb(struct sk_buff *skb, int err)
752{
753	return 0;
754}
755
756static const struct xfrm_type ah6_type = {
757	.description	= "AH6",
758	.owner		= THIS_MODULE,
759	.proto		= IPPROTO_AH,
760	.flags		= XFRM_TYPE_REPLAY_PROT,
761	.init_state	= ah6_init_state,
762	.destructor	= ah6_destroy,
763	.input		= ah6_input,
764	.output		= ah6_output,
765	.hdr_offset	= xfrm6_find_1stfragopt,
766};
767
768static struct xfrm6_protocol ah6_protocol = {
769	.handler	=	xfrm6_rcv,
770	.input_handler	=	xfrm_input,
771	.cb_handler	=	ah6_rcv_cb,
772	.err_handler	=	ah6_err,
773	.priority	=	0,
774};
775
776static int __init ah6_init(void)
777{
778	if (xfrm_register_type(&ah6_type, AF_INET6) < 0) {
779		pr_info("%s: can't add xfrm type\n", __func__);
780		return -EAGAIN;
781	}
782
783	if (xfrm6_protocol_register(&ah6_protocol, IPPROTO_AH) < 0) {
784		pr_info("%s: can't add protocol\n", __func__);
785		xfrm_unregister_type(&ah6_type, AF_INET6);
786		return -EAGAIN;
787	}
788
789	return 0;
790}
791
792static void __exit ah6_fini(void)
793{
794	if (xfrm6_protocol_deregister(&ah6_protocol, IPPROTO_AH) < 0)
795		pr_info("%s: can't remove protocol\n", __func__);
 
 
 
796
797	xfrm_unregister_type(&ah6_type, AF_INET6);
798}
799
800module_init(ah6_init);
801module_exit(ah6_fini);
802
803MODULE_LICENSE("GPL");
804MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_AH);