Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * Copyright (C)2002 USAGI/WIDE Project
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write to the Free Software
 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 17 *
 18 * Authors
 19 *
 20 *	Mitsuru KANDA @USAGI       : IPv6 Support
 21 * 	Kazunori MIYAZAWA @USAGI   :
 22 * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
 23 *
 24 * 	This file is derived from net/ipv4/ah.c.
 25 */
 26
 27#define pr_fmt(fmt) "IPv6: " fmt
 28
 
 29#include <crypto/hash.h>
 30#include <linux/module.h>
 31#include <linux/slab.h>
 32#include <net/ip.h>
 33#include <net/ah.h>
 34#include <linux/crypto.h>
 35#include <linux/pfkeyv2.h>
 36#include <linux/string.h>
 37#include <linux/scatterlist.h>
 
 38#include <net/icmp.h>
 39#include <net/ipv6.h>
 40#include <net/protocol.h>
 41#include <net/xfrm.h>
 42
 43#define IPV6HDR_BASELEN 8
 44
 45struct tmp_ext {
 46#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
 47		struct in6_addr saddr;
 48#endif
 49		struct in6_addr daddr;
 50		char hdrs[0];
 51};
 52
 53struct ah_skb_cb {
 54	struct xfrm_skb_cb xfrm;
 55	void *tmp;
 56};
 57
 58#define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
 59
 60static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
 61			  unsigned int size)
 62{
 63	unsigned int len;
 64
 65	len = size + crypto_ahash_digestsize(ahash) +
 66	      (crypto_ahash_alignmask(ahash) &
 67	       ~(crypto_tfm_ctx_alignment() - 1));
 68
 69	len = ALIGN(len, crypto_tfm_ctx_alignment());
 70
 71	len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
 72	len = ALIGN(len, __alignof__(struct scatterlist));
 73
 74	len += sizeof(struct scatterlist) * nfrags;
 75
 76	return kmalloc(len, GFP_ATOMIC);
 77}
 78
 79static inline struct tmp_ext *ah_tmp_ext(void *base)
 80{
 81	return base + IPV6HDR_BASELEN;
 82}
 83
 84static inline u8 *ah_tmp_auth(u8 *tmp, unsigned int offset)
 85{
 86	return tmp + offset;
 87}
 88
 89static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
 90			     unsigned int offset)
 91{
 92	return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
 93}
 94
 95static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
 96					       u8 *icv)
 97{
 98	struct ahash_request *req;
 99
100	req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
101				crypto_tfm_ctx_alignment());
102
103	ahash_request_set_tfm(req, ahash);
104
105	return req;
106}
107
108static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
109					     struct ahash_request *req)
110{
111	return (void *)ALIGN((unsigned long)(req + 1) +
112			     crypto_ahash_reqsize(ahash),
113			     __alignof__(struct scatterlist));
114}
115
116static bool zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
117{
118	u8 *opt = (u8 *)opthdr;
119	int len = ipv6_optlen(opthdr);
120	int off = 0;
121	int optlen = 0;
122
123	off += 2;
124	len -= 2;
125
126	while (len > 0) {
127
128		switch (opt[off]) {
129
130		case IPV6_TLV_PAD1:
131			optlen = 1;
132			break;
133		default:
134			if (len < 2)
135				goto bad;
136			optlen = opt[off+1]+2;
137			if (len < optlen)
138				goto bad;
139			if (opt[off] & 0x20)
140				memset(&opt[off+2], 0, opt[off+1]);
141			break;
142		}
143
144		off += optlen;
145		len -= optlen;
146	}
147	if (len == 0)
148		return true;
149
150bad:
151	return false;
152}
153
154#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
155/**
156 *	ipv6_rearrange_destopt - rearrange IPv6 destination options header
157 *	@iph: IPv6 header
158 *	@destopt: destionation options header
159 */
160static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt)
161{
162	u8 *opt = (u8 *)destopt;
163	int len = ipv6_optlen(destopt);
164	int off = 0;
165	int optlen = 0;
166
167	off += 2;
168	len -= 2;
169
170	while (len > 0) {
171
172		switch (opt[off]) {
173
174		case IPV6_TLV_PAD1:
175			optlen = 1;
176			break;
177		default:
178			if (len < 2)
179				goto bad;
180			optlen = opt[off+1]+2;
181			if (len < optlen)
182				goto bad;
183
184			/* Rearrange the source address in @iph and the
185			 * addresses in home address option for final source.
186			 * See 11.3.2 of RFC 3775 for details.
187			 */
188			if (opt[off] == IPV6_TLV_HAO) {
189				struct in6_addr final_addr;
190				struct ipv6_destopt_hao *hao;
191
192				hao = (struct ipv6_destopt_hao *)&opt[off];
193				if (hao->length != sizeof(hao->addr)) {
194					net_warn_ratelimited("destopt hao: invalid header length: %u\n",
195							     hao->length);
196					goto bad;
197				}
198				final_addr = hao->addr;
199				hao->addr = iph->saddr;
200				iph->saddr = final_addr;
201			}
202			break;
203		}
204
205		off += optlen;
206		len -= optlen;
207	}
208	/* Note: ok if len == 0 */
209bad:
210	return;
211}
212#else
213static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt) {}
214#endif
215
216/**
217 *	ipv6_rearrange_rthdr - rearrange IPv6 routing header
218 *	@iph: IPv6 header
219 *	@rthdr: routing header
220 *
221 *	Rearrange the destination address in @iph and the addresses in @rthdr
222 *	so that they appear in the order they will at the final destination.
223 *	See Appendix A2 of RFC 2402 for details.
224 */
225static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr)
226{
227	int segments, segments_left;
228	struct in6_addr *addrs;
229	struct in6_addr final_addr;
230
231	segments_left = rthdr->segments_left;
232	if (segments_left == 0)
233		return;
234	rthdr->segments_left = 0;
235
236	/* The value of rthdr->hdrlen has been verified either by the system
237	 * call if it is locally generated, or by ipv6_rthdr_rcv() for incoming
238	 * packets.  So we can assume that it is even and that segments is
239	 * greater than or equal to segments_left.
240	 *
241	 * For the same reason we can assume that this option is of type 0.
242	 */
243	segments = rthdr->hdrlen >> 1;
244
245	addrs = ((struct rt0_hdr *)rthdr)->addr;
246	final_addr = addrs[segments - 1];
247
248	addrs += segments - segments_left;
249	memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs));
250
251	addrs[0] = iph->daddr;
252	iph->daddr = final_addr;
253}
254
255static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
256{
257	union {
258		struct ipv6hdr *iph;
259		struct ipv6_opt_hdr *opth;
260		struct ipv6_rt_hdr *rth;
261		char *raw;
262	} exthdr = { .iph = iph };
263	char *end = exthdr.raw + len;
264	int nexthdr = iph->nexthdr;
265
266	exthdr.iph++;
267
268	while (exthdr.raw < end) {
269		switch (nexthdr) {
270		case NEXTHDR_DEST:
271			if (dir == XFRM_POLICY_OUT)
272				ipv6_rearrange_destopt(iph, exthdr.opth);
 
273		case NEXTHDR_HOP:
274			if (!zero_out_mutable_opts(exthdr.opth)) {
275				LIMIT_NETDEBUG(
276					KERN_WARNING "overrun %sopts\n",
277					nexthdr == NEXTHDR_HOP ?
278						"hop" : "dest");
279				return -EINVAL;
280			}
281			break;
282
283		case NEXTHDR_ROUTING:
284			ipv6_rearrange_rthdr(iph, exthdr.rth);
285			break;
286
287		default :
288			return 0;
289		}
290
291		nexthdr = exthdr.opth->nexthdr;
292		exthdr.raw += ipv6_optlen(exthdr.opth);
293	}
294
295	return 0;
296}
297
298static void ah6_output_done(struct crypto_async_request *base, int err)
299{
300	int extlen;
301	u8 *iph_base;
302	u8 *icv;
303	struct sk_buff *skb = base->data;
304	struct xfrm_state *x = skb_dst(skb)->xfrm;
305	struct ah_data *ahp = x->data;
306	struct ipv6hdr *top_iph = ipv6_hdr(skb);
307	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
308	struct tmp_ext *iph_ext;
309
310	extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
311	if (extlen)
312		extlen += sizeof(*iph_ext);
313
314	iph_base = AH_SKB_CB(skb)->tmp;
315	iph_ext = ah_tmp_ext(iph_base);
316	icv = ah_tmp_icv(ahp->ahash, iph_ext, extlen);
317
318	memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
319	memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
320
321	if (extlen) {
322#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
323		memcpy(&top_iph->saddr, iph_ext, extlen);
324#else
325		memcpy(&top_iph->daddr, iph_ext, extlen);
326#endif
327	}
328
329	kfree(AH_SKB_CB(skb)->tmp);
330	xfrm_output_resume(skb, err);
331}
332
333static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
334{
335	int err;
336	int nfrags;
337	int extlen;
338	u8 *iph_base;
339	u8 *icv;
340	u8 nexthdr;
341	struct sk_buff *trailer;
342	struct crypto_ahash *ahash;
343	struct ahash_request *req;
344	struct scatterlist *sg;
345	struct ipv6hdr *top_iph;
346	struct ip_auth_hdr *ah;
347	struct ah_data *ahp;
348	struct tmp_ext *iph_ext;
 
 
 
 
349
350	ahp = x->data;
351	ahash = ahp->ahash;
352
353	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
 
354		goto out;
355	nfrags = err;
356
357	skb_push(skb, -skb_network_offset(skb));
358	extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
359	if (extlen)
360		extlen += sizeof(*iph_ext);
361
 
 
 
 
362	err = -ENOMEM;
363	iph_base = ah_alloc_tmp(ahash, nfrags, IPV6HDR_BASELEN + extlen);
 
364	if (!iph_base)
365		goto out;
366
367	iph_ext = ah_tmp_ext(iph_base);
368	icv = ah_tmp_icv(ahash, iph_ext, extlen);
 
369	req = ah_tmp_req(ahash, icv);
370	sg = ah_req_sg(ahash, req);
 
371
372	ah = ip_auth_hdr(skb);
373	memset(ah->auth_data, 0, ahp->icv_trunc_len);
374
375	top_iph = ipv6_hdr(skb);
376	top_iph->payload_len = htons(skb->len - sizeof(*top_iph));
377
378	nexthdr = *skb_mac_header(skb);
379	*skb_mac_header(skb) = IPPROTO_AH;
380
381	/* When there are no extension headers, we only need to save the first
382	 * 8 bytes of the base IP header.
383	 */
384	memcpy(iph_base, top_iph, IPV6HDR_BASELEN);
385
386	if (extlen) {
387#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
388		memcpy(iph_ext, &top_iph->saddr, extlen);
389#else
390		memcpy(iph_ext, &top_iph->daddr, extlen);
391#endif
392		err = ipv6_clear_mutable_options(top_iph,
393						 extlen - sizeof(*iph_ext) +
394						 sizeof(*top_iph),
395						 XFRM_POLICY_OUT);
396		if (err)
397			goto out_free;
398	}
399
400	ah->nexthdr = nexthdr;
401
402	top_iph->priority    = 0;
403	top_iph->flow_lbl[0] = 0;
404	top_iph->flow_lbl[1] = 0;
405	top_iph->flow_lbl[2] = 0;
406	top_iph->hop_limit   = 0;
407
408	ah->hdrlen  = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
409
410	ah->reserved = 0;
411	ah->spi = x->id.spi;
412	ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
413
414	sg_init_table(sg, nfrags);
415	skb_to_sgvec(skb, sg, 0, skb->len);
 
 
416
417	ahash_request_set_crypt(req, sg, icv, skb->len);
 
 
 
 
 
418	ahash_request_set_callback(req, 0, ah6_output_done, skb);
419
420	AH_SKB_CB(skb)->tmp = iph_base;
421
422	err = crypto_ahash_digest(req);
423	if (err) {
424		if (err == -EINPROGRESS)
425			goto out;
426
427		if (err == -EBUSY)
428			err = NET_XMIT_DROP;
429		goto out_free;
430	}
431
432	memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
433	memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
434
435	if (extlen) {
436#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
437		memcpy(&top_iph->saddr, iph_ext, extlen);
438#else
439		memcpy(&top_iph->daddr, iph_ext, extlen);
440#endif
441	}
442
443out_free:
444	kfree(iph_base);
445out:
446	return err;
447}
448
449static void ah6_input_done(struct crypto_async_request *base, int err)
450{
451	u8 *auth_data;
452	u8 *icv;
453	u8 *work_iph;
454	struct sk_buff *skb = base->data;
455	struct xfrm_state *x = xfrm_input_state(skb);
456	struct ah_data *ahp = x->data;
457	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
458	int hdr_len = skb_network_header_len(skb);
459	int ah_hlen = (ah->hdrlen + 2) << 2;
 
 
 
460
461	work_iph = AH_SKB_CB(skb)->tmp;
462	auth_data = ah_tmp_auth(work_iph, hdr_len);
463	icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
464
465	err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
466	if (err)
467		goto out;
468
469	err = ah->nexthdr;
470
471	skb->network_header += ah_hlen;
472	memcpy(skb_network_header(skb), work_iph, hdr_len);
473	__skb_pull(skb, ah_hlen + hdr_len);
474	skb_set_transport_header(skb, -hdr_len);
 
 
 
475out:
476	kfree(AH_SKB_CB(skb)->tmp);
477	xfrm_input_resume(skb, err);
478}
479
480
481
482static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
483{
484	/*
485	 * Before process AH
486	 * [IPv6][Ext1][Ext2][AH][Dest][Payload]
487	 * |<-------------->| hdr_len
488	 *
489	 * To erase AH:
490	 * Keeping copy of cleared headers. After AH processing,
491	 * Moving the pointer of skb->network_header by using skb_pull as long
492	 * as AH header length. Then copy back the copy as long as hdr_len
493	 * If destination header following AH exists, copy it into after [Ext2].
494	 *
495	 * |<>|[IPv6][Ext1][Ext2][Dest][Payload]
496	 * There is offset of AH before IPv6 header after the process.
497	 */
498
499	u8 *auth_data;
500	u8 *icv;
501	u8 *work_iph;
502	struct sk_buff *trailer;
503	struct crypto_ahash *ahash;
504	struct ahash_request *req;
505	struct scatterlist *sg;
506	struct ip_auth_hdr *ah;
507	struct ipv6hdr *ip6h;
508	struct ah_data *ahp;
509	u16 hdr_len;
510	u16 ah_hlen;
511	int nexthdr;
512	int nfrags;
513	int err = -ENOMEM;
 
 
 
 
514
515	if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
516		goto out;
517
518	/* We are going to _remove_ AH header to keep sockets happy,
519	 * so... Later this can change. */
520	if (skb_cloned(skb) &&
521	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
522		goto out;
523
524	skb->ip_summed = CHECKSUM_NONE;
525
526	hdr_len = skb_network_header_len(skb);
527	ah = (struct ip_auth_hdr *)skb->data;
528	ahp = x->data;
529	ahash = ahp->ahash;
530
531	nexthdr = ah->nexthdr;
532	ah_hlen = (ah->hdrlen + 2) << 2;
533
534	if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
535	    ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
536		goto out;
537
538	if (!pskb_may_pull(skb, ah_hlen))
539		goto out;
540
541
542	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
543		goto out;
544	nfrags = err;
545
546	ah = (struct ip_auth_hdr *)skb->data;
547	ip6h = ipv6_hdr(skb);
548
549	skb_push(skb, hdr_len);
550
551	work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len);
552	if (!work_iph)
 
 
 
 
 
 
 
553		goto out;
 
554
555	auth_data = ah_tmp_auth(work_iph, hdr_len);
556	icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len);
 
557	req = ah_tmp_req(ahash, icv);
558	sg = ah_req_sg(ahash, req);
 
559
560	memcpy(work_iph, ip6h, hdr_len);
561	memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
562	memset(ah->auth_data, 0, ahp->icv_trunc_len);
563
564	if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN))
 
565		goto out_free;
566
567	ip6h->priority    = 0;
568	ip6h->flow_lbl[0] = 0;
569	ip6h->flow_lbl[1] = 0;
570	ip6h->flow_lbl[2] = 0;
571	ip6h->hop_limit   = 0;
572
573	sg_init_table(sg, nfrags);
574	skb_to_sgvec(skb, sg, 0, skb->len);
 
 
575
576	ahash_request_set_crypt(req, sg, icv, skb->len);
 
 
 
 
 
 
577	ahash_request_set_callback(req, 0, ah6_input_done, skb);
578
579	AH_SKB_CB(skb)->tmp = work_iph;
580
581	err = crypto_ahash_digest(req);
582	if (err) {
583		if (err == -EINPROGRESS)
584			goto out;
585
586		goto out_free;
587	}
588
589	err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
590	if (err)
591		goto out_free;
592
593	skb->network_header += ah_hlen;
594	memcpy(skb_network_header(skb), work_iph, hdr_len);
595	skb->transport_header = skb->network_header;
596	__skb_pull(skb, ah_hlen + hdr_len);
597
 
 
 
 
 
598	err = nexthdr;
599
600out_free:
601	kfree(work_iph);
602out:
603	return err;
604}
605
606static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
607		    u8 type, u8 code, int offset, __be32 info)
608{
609	struct net *net = dev_net(skb->dev);
610	struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
611	struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset);
612	struct xfrm_state *x;
613
614	if (type != ICMPV6_DEST_UNREACH &&
615	    type != ICMPV6_PKT_TOOBIG)
616		return;
617
618	x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
619	if (!x)
620		return;
621
622	NETDEBUG(KERN_DEBUG "pmtu discovery on SA AH/%08x/%pI6\n",
623		 ntohl(ah->spi), &iph->daddr);
624
 
 
 
 
 
625	xfrm_state_put(x);
 
 
626}
627
628static int ah6_init_state(struct xfrm_state *x)
629{
630	struct ah_data *ahp = NULL;
631	struct xfrm_algo_desc *aalg_desc;
632	struct crypto_ahash *ahash;
633
634	if (!x->aalg)
 
635		goto error;
 
636
637	if (x->encap)
 
638		goto error;
 
639
640	ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
641	if (ahp == NULL)
642		return -ENOMEM;
643
644	ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
645	if (IS_ERR(ahash))
 
646		goto error;
 
647
648	ahp->ahash = ahash;
649	if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
650			       (x->aalg->alg_key_len + 7) / 8))
 
651		goto error;
 
652
653	/*
654	 * Lookup the algorithm description maintained by xfrm_algo,
655	 * verify crypto transform properties, and store information
656	 * we need for AH processing.  This lookup cannot fail here
657	 * after a successful crypto_alloc_hash().
658	 */
659	aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
660	BUG_ON(!aalg_desc);
661
662	if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
663	    crypto_ahash_digestsize(ahash)) {
664		pr_info("AH: %s digestsize %u != %hu\n",
665			x->aalg->alg_name, crypto_ahash_digestsize(ahash),
666			aalg_desc->uinfo.auth.icv_fullbits/8);
667		goto error;
668	}
669
670	ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
671	ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
672
673	BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
674
675	x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
676					  ahp->icv_trunc_len);
677	switch (x->props.mode) {
678	case XFRM_MODE_BEET:
679	case XFRM_MODE_TRANSPORT:
680		break;
681	case XFRM_MODE_TUNNEL:
682		x->props.header_len += sizeof(struct ipv6hdr);
683		break;
684	default:
 
685		goto error;
686	}
687	x->data = ahp;
688
689	return 0;
690
691error:
692	if (ahp) {
693		crypto_free_ahash(ahp->ahash);
694		kfree(ahp);
695	}
696	return -EINVAL;
697}
698
699static void ah6_destroy(struct xfrm_state *x)
700{
701	struct ah_data *ahp = x->data;
702
703	if (!ahp)
704		return;
705
706	crypto_free_ahash(ahp->ahash);
707	kfree(ahp);
708}
709
710static const struct xfrm_type ah6_type =
711{
712	.description	= "AH6",
 
 
 
713	.owner		= THIS_MODULE,
714	.proto	     	= IPPROTO_AH,
715	.flags		= XFRM_TYPE_REPLAY_PROT,
716	.init_state	= ah6_init_state,
717	.destructor	= ah6_destroy,
718	.input		= ah6_input,
719	.output		= ah6_output,
720	.hdr_offset	= xfrm6_find_1stfragopt,
721};
722
723static const struct inet6_protocol ah6_protocol = {
724	.handler	=	xfrm6_rcv,
 
 
725	.err_handler	=	ah6_err,
726	.flags		=	INET6_PROTO_NOPOLICY,
727};
728
729static int __init ah6_init(void)
730{
731	if (xfrm_register_type(&ah6_type, AF_INET6) < 0) {
732		pr_info("%s: can't add xfrm type\n", __func__);
733		return -EAGAIN;
734	}
735
736	if (inet6_add_protocol(&ah6_protocol, IPPROTO_AH) < 0) {
737		pr_info("%s: can't add protocol\n", __func__);
738		xfrm_unregister_type(&ah6_type, AF_INET6);
739		return -EAGAIN;
740	}
741
742	return 0;
743}
744
745static void __exit ah6_fini(void)
746{
747	if (inet6_del_protocol(&ah6_protocol, IPPROTO_AH) < 0)
748		pr_info("%s: can't remove protocol\n", __func__);
749
750	if (xfrm_unregister_type(&ah6_type, AF_INET6) < 0)
751		pr_info("%s: can't remove xfrm type\n", __func__);
752
753}
754
755module_init(ah6_init);
756module_exit(ah6_fini);
757
758MODULE_LICENSE("GPL");
759MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_AH);
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright (C)2002 USAGI/WIDE Project
  4 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 * Authors
  6 *
  7 *	Mitsuru KANDA @USAGI       : IPv6 Support
  8 *	Kazunori MIYAZAWA @USAGI   :
  9 *	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
 10 *
 11 *	This file is derived from net/ipv4/ah.c.
 12 */
 13
 14#define pr_fmt(fmt) "IPv6: " fmt
 15
 16#include <crypto/algapi.h>
 17#include <crypto/hash.h>
 18#include <linux/module.h>
 19#include <linux/slab.h>
 20#include <net/ip.h>
 21#include <net/ah.h>
 22#include <linux/crypto.h>
 23#include <linux/pfkeyv2.h>
 24#include <linux/string.h>
 25#include <linux/scatterlist.h>
 26#include <net/ip6_route.h>
 27#include <net/icmp.h>
 28#include <net/ipv6.h>
 29#include <net/protocol.h>
 30#include <net/xfrm.h>
 31
 32#define IPV6HDR_BASELEN 8
 33
 34struct tmp_ext {
 35#if IS_ENABLED(CONFIG_IPV6_MIP6)
 36		struct in6_addr saddr;
 37#endif
 38		struct in6_addr daddr;
 39		char hdrs[];
 40};
 41
 42struct ah_skb_cb {
 43	struct xfrm_skb_cb xfrm;
 44	void *tmp;
 45};
 46
 47#define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
 48
 49static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
 50			  unsigned int size)
 51{
 52	unsigned int len;
 53
 54	len = size + crypto_ahash_digestsize(ahash) +
 55	      (crypto_ahash_alignmask(ahash) &
 56	       ~(crypto_tfm_ctx_alignment() - 1));
 57
 58	len = ALIGN(len, crypto_tfm_ctx_alignment());
 59
 60	len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
 61	len = ALIGN(len, __alignof__(struct scatterlist));
 62
 63	len += sizeof(struct scatterlist) * nfrags;
 64
 65	return kmalloc(len, GFP_ATOMIC);
 66}
 67
 68static inline struct tmp_ext *ah_tmp_ext(void *base)
 69{
 70	return base + IPV6HDR_BASELEN;
 71}
 72
 73static inline u8 *ah_tmp_auth(u8 *tmp, unsigned int offset)
 74{
 75	return tmp + offset;
 76}
 77
 78static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
 79			     unsigned int offset)
 80{
 81	return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
 82}
 83
 84static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
 85					       u8 *icv)
 86{
 87	struct ahash_request *req;
 88
 89	req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
 90				crypto_tfm_ctx_alignment());
 91
 92	ahash_request_set_tfm(req, ahash);
 93
 94	return req;
 95}
 96
 97static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
 98					     struct ahash_request *req)
 99{
100	return (void *)ALIGN((unsigned long)(req + 1) +
101			     crypto_ahash_reqsize(ahash),
102			     __alignof__(struct scatterlist));
103}
104
105static bool zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
106{
107	u8 *opt = (u8 *)opthdr;
108	int len = ipv6_optlen(opthdr);
109	int off = 0;
110	int optlen = 0;
111
112	off += 2;
113	len -= 2;
114
115	while (len > 0) {
116
117		switch (opt[off]) {
118
119		case IPV6_TLV_PAD1:
120			optlen = 1;
121			break;
122		default:
123			if (len < 2)
124				goto bad;
125			optlen = opt[off+1]+2;
126			if (len < optlen)
127				goto bad;
128			if (opt[off] & 0x20)
129				memset(&opt[off+2], 0, opt[off+1]);
130			break;
131		}
132
133		off += optlen;
134		len -= optlen;
135	}
136	if (len == 0)
137		return true;
138
139bad:
140	return false;
141}
142
143#if IS_ENABLED(CONFIG_IPV6_MIP6)
144/**
145 *	ipv6_rearrange_destopt - rearrange IPv6 destination options header
146 *	@iph: IPv6 header
147 *	@destopt: destionation options header
148 */
149static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt)
150{
151	u8 *opt = (u8 *)destopt;
152	int len = ipv6_optlen(destopt);
153	int off = 0;
154	int optlen = 0;
155
156	off += 2;
157	len -= 2;
158
159	while (len > 0) {
160
161		switch (opt[off]) {
162
163		case IPV6_TLV_PAD1:
164			optlen = 1;
165			break;
166		default:
167			if (len < 2)
168				goto bad;
169			optlen = opt[off+1]+2;
170			if (len < optlen)
171				goto bad;
172
173			/* Rearrange the source address in @iph and the
174			 * addresses in home address option for final source.
175			 * See 11.3.2 of RFC 3775 for details.
176			 */
177			if (opt[off] == IPV6_TLV_HAO) {
 
178				struct ipv6_destopt_hao *hao;
179
180				hao = (struct ipv6_destopt_hao *)&opt[off];
181				if (hao->length != sizeof(hao->addr)) {
182					net_warn_ratelimited("destopt hao: invalid header length: %u\n",
183							     hao->length);
184					goto bad;
185				}
186				swap(hao->addr, iph->saddr);
 
 
187			}
188			break;
189		}
190
191		off += optlen;
192		len -= optlen;
193	}
194	/* Note: ok if len == 0 */
195bad:
196	return;
197}
198#else
199static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt) {}
200#endif
201
202/**
203 *	ipv6_rearrange_rthdr - rearrange IPv6 routing header
204 *	@iph: IPv6 header
205 *	@rthdr: routing header
206 *
207 *	Rearrange the destination address in @iph and the addresses in @rthdr
208 *	so that they appear in the order they will at the final destination.
209 *	See Appendix A2 of RFC 2402 for details.
210 */
211static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr)
212{
213	int segments, segments_left;
214	struct in6_addr *addrs;
215	struct in6_addr final_addr;
216
217	segments_left = rthdr->segments_left;
218	if (segments_left == 0)
219		return;
220	rthdr->segments_left = 0;
221
222	/* The value of rthdr->hdrlen has been verified either by the system
223	 * call if it is locally generated, or by ipv6_rthdr_rcv() for incoming
224	 * packets.  So we can assume that it is even and that segments is
225	 * greater than or equal to segments_left.
226	 *
227	 * For the same reason we can assume that this option is of type 0.
228	 */
229	segments = rthdr->hdrlen >> 1;
230
231	addrs = ((struct rt0_hdr *)rthdr)->addr;
232	final_addr = addrs[segments - 1];
233
234	addrs += segments - segments_left;
235	memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs));
236
237	addrs[0] = iph->daddr;
238	iph->daddr = final_addr;
239}
240
241static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
242{
243	union {
244		struct ipv6hdr *iph;
245		struct ipv6_opt_hdr *opth;
246		struct ipv6_rt_hdr *rth;
247		char *raw;
248	} exthdr = { .iph = iph };
249	char *end = exthdr.raw + len;
250	int nexthdr = iph->nexthdr;
251
252	exthdr.iph++;
253
254	while (exthdr.raw < end) {
255		switch (nexthdr) {
256		case NEXTHDR_DEST:
257			if (dir == XFRM_POLICY_OUT)
258				ipv6_rearrange_destopt(iph, exthdr.opth);
259			fallthrough;
260		case NEXTHDR_HOP:
261			if (!zero_out_mutable_opts(exthdr.opth)) {
262				net_dbg_ratelimited("overrun %sopts\n",
263						    nexthdr == NEXTHDR_HOP ?
264						    "hop" : "dest");
 
265				return -EINVAL;
266			}
267			break;
268
269		case NEXTHDR_ROUTING:
270			ipv6_rearrange_rthdr(iph, exthdr.rth);
271			break;
272
273		default:
274			return 0;
275		}
276
277		nexthdr = exthdr.opth->nexthdr;
278		exthdr.raw += ipv6_optlen(exthdr.opth);
279	}
280
281	return 0;
282}
283
284static void ah6_output_done(struct crypto_async_request *base, int err)
285{
286	int extlen;
287	u8 *iph_base;
288	u8 *icv;
289	struct sk_buff *skb = base->data;
290	struct xfrm_state *x = skb_dst(skb)->xfrm;
291	struct ah_data *ahp = x->data;
292	struct ipv6hdr *top_iph = ipv6_hdr(skb);
293	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
294	struct tmp_ext *iph_ext;
295
296	extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
297	if (extlen)
298		extlen += sizeof(*iph_ext);
299
300	iph_base = AH_SKB_CB(skb)->tmp;
301	iph_ext = ah_tmp_ext(iph_base);
302	icv = ah_tmp_icv(ahp->ahash, iph_ext, extlen);
303
304	memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
305	memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
306
307	if (extlen) {
308#if IS_ENABLED(CONFIG_IPV6_MIP6)
309		memcpy(&top_iph->saddr, iph_ext, extlen);
310#else
311		memcpy(&top_iph->daddr, iph_ext, extlen);
312#endif
313	}
314
315	kfree(AH_SKB_CB(skb)->tmp);
316	xfrm_output_resume(skb->sk, skb, err);
317}
318
319static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
320{
321	int err;
322	int nfrags;
323	int extlen;
324	u8 *iph_base;
325	u8 *icv;
326	u8 nexthdr;
327	struct sk_buff *trailer;
328	struct crypto_ahash *ahash;
329	struct ahash_request *req;
330	struct scatterlist *sg;
331	struct ipv6hdr *top_iph;
332	struct ip_auth_hdr *ah;
333	struct ah_data *ahp;
334	struct tmp_ext *iph_ext;
335	int seqhi_len = 0;
336	__be32 *seqhi;
337	int sglists = 0;
338	struct scatterlist *seqhisg;
339
340	ahp = x->data;
341	ahash = ahp->ahash;
342
343	err = skb_cow_data(skb, 0, &trailer);
344	if (err < 0)
345		goto out;
346	nfrags = err;
347
348	skb_push(skb, -skb_network_offset(skb));
349	extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
350	if (extlen)
351		extlen += sizeof(*iph_ext);
352
353	if (x->props.flags & XFRM_STATE_ESN) {
354		sglists = 1;
355		seqhi_len = sizeof(*seqhi);
356	}
357	err = -ENOMEM;
358	iph_base = ah_alloc_tmp(ahash, nfrags + sglists, IPV6HDR_BASELEN +
359				extlen + seqhi_len);
360	if (!iph_base)
361		goto out;
362
363	iph_ext = ah_tmp_ext(iph_base);
364	seqhi = (__be32 *)((char *)iph_ext + extlen);
365	icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
366	req = ah_tmp_req(ahash, icv);
367	sg = ah_req_sg(ahash, req);
368	seqhisg = sg + nfrags;
369
370	ah = ip_auth_hdr(skb);
371	memset(ah->auth_data, 0, ahp->icv_trunc_len);
372
373	top_iph = ipv6_hdr(skb);
374	top_iph->payload_len = htons(skb->len - sizeof(*top_iph));
375
376	nexthdr = *skb_mac_header(skb);
377	*skb_mac_header(skb) = IPPROTO_AH;
378
379	/* When there are no extension headers, we only need to save the first
380	 * 8 bytes of the base IP header.
381	 */
382	memcpy(iph_base, top_iph, IPV6HDR_BASELEN);
383
384	if (extlen) {
385#if IS_ENABLED(CONFIG_IPV6_MIP6)
386		memcpy(iph_ext, &top_iph->saddr, extlen);
387#else
388		memcpy(iph_ext, &top_iph->daddr, extlen);
389#endif
390		err = ipv6_clear_mutable_options(top_iph,
391						 extlen - sizeof(*iph_ext) +
392						 sizeof(*top_iph),
393						 XFRM_POLICY_OUT);
394		if (err)
395			goto out_free;
396	}
397
398	ah->nexthdr = nexthdr;
399
400	top_iph->priority    = 0;
401	top_iph->flow_lbl[0] = 0;
402	top_iph->flow_lbl[1] = 0;
403	top_iph->flow_lbl[2] = 0;
404	top_iph->hop_limit   = 0;
405
406	ah->hdrlen  = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
407
408	ah->reserved = 0;
409	ah->spi = x->id.spi;
410	ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
411
412	sg_init_table(sg, nfrags + sglists);
413	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
414	if (unlikely(err < 0))
415		goto out_free;
416
417	if (x->props.flags & XFRM_STATE_ESN) {
418		/* Attach seqhi sg right after packet payload */
419		*seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
420		sg_set_buf(seqhisg, seqhi, seqhi_len);
421	}
422	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
423	ahash_request_set_callback(req, 0, ah6_output_done, skb);
424
425	AH_SKB_CB(skb)->tmp = iph_base;
426
427	err = crypto_ahash_digest(req);
428	if (err) {
429		if (err == -EINPROGRESS)
430			goto out;
431
432		if (err == -ENOSPC)
433			err = NET_XMIT_DROP;
434		goto out_free;
435	}
436
437	memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
438	memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
439
440	if (extlen) {
441#if IS_ENABLED(CONFIG_IPV6_MIP6)
442		memcpy(&top_iph->saddr, iph_ext, extlen);
443#else
444		memcpy(&top_iph->daddr, iph_ext, extlen);
445#endif
446	}
447
448out_free:
449	kfree(iph_base);
450out:
451	return err;
452}
453
454static void ah6_input_done(struct crypto_async_request *base, int err)
455{
456	u8 *auth_data;
457	u8 *icv;
458	u8 *work_iph;
459	struct sk_buff *skb = base->data;
460	struct xfrm_state *x = xfrm_input_state(skb);
461	struct ah_data *ahp = x->data;
462	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
463	int hdr_len = skb_network_header_len(skb);
464	int ah_hlen = ipv6_authlen(ah);
465
466	if (err)
467		goto out;
468
469	work_iph = AH_SKB_CB(skb)->tmp;
470	auth_data = ah_tmp_auth(work_iph, hdr_len);
471	icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
472
473	err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
474	if (err)
475		goto out;
476
477	err = ah->nexthdr;
478
479	skb->network_header += ah_hlen;
480	memcpy(skb_network_header(skb), work_iph, hdr_len);
481	__skb_pull(skb, ah_hlen + hdr_len);
482	if (x->props.mode == XFRM_MODE_TUNNEL)
483		skb_reset_transport_header(skb);
484	else
485		skb_set_transport_header(skb, -hdr_len);
486out:
487	kfree(AH_SKB_CB(skb)->tmp);
488	xfrm_input_resume(skb, err);
489}
490
491
492
493static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
494{
495	/*
496	 * Before process AH
497	 * [IPv6][Ext1][Ext2][AH][Dest][Payload]
498	 * |<-------------->| hdr_len
499	 *
500	 * To erase AH:
501	 * Keeping copy of cleared headers. After AH processing,
502	 * Moving the pointer of skb->network_header by using skb_pull as long
503	 * as AH header length. Then copy back the copy as long as hdr_len
504	 * If destination header following AH exists, copy it into after [Ext2].
505	 *
506	 * |<>|[IPv6][Ext1][Ext2][Dest][Payload]
507	 * There is offset of AH before IPv6 header after the process.
508	 */
509
510	u8 *auth_data;
511	u8 *icv;
512	u8 *work_iph;
513	struct sk_buff *trailer;
514	struct crypto_ahash *ahash;
515	struct ahash_request *req;
516	struct scatterlist *sg;
517	struct ip_auth_hdr *ah;
518	struct ipv6hdr *ip6h;
519	struct ah_data *ahp;
520	u16 hdr_len;
521	u16 ah_hlen;
522	int nexthdr;
523	int nfrags;
524	int err = -ENOMEM;
525	int seqhi_len = 0;
526	__be32 *seqhi;
527	int sglists = 0;
528	struct scatterlist *seqhisg;
529
530	if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
531		goto out;
532
533	/* We are going to _remove_ AH header to keep sockets happy,
534	 * so... Later this can change. */
535	if (skb_unclone(skb, GFP_ATOMIC))
 
536		goto out;
537
538	skb->ip_summed = CHECKSUM_NONE;
539
540	hdr_len = skb_network_header_len(skb);
541	ah = (struct ip_auth_hdr *)skb->data;
542	ahp = x->data;
543	ahash = ahp->ahash;
544
545	nexthdr = ah->nexthdr;
546	ah_hlen = ipv6_authlen(ah);
547
548	if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
549	    ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
550		goto out;
551
552	if (!pskb_may_pull(skb, ah_hlen))
553		goto out;
554
555	err = skb_cow_data(skb, 0, &trailer);
556	if (err < 0)
557		goto out;
558	nfrags = err;
559
560	ah = (struct ip_auth_hdr *)skb->data;
561	ip6h = ipv6_hdr(skb);
562
563	skb_push(skb, hdr_len);
564
565	if (x->props.flags & XFRM_STATE_ESN) {
566		sglists = 1;
567		seqhi_len = sizeof(*seqhi);
568	}
569
570	work_iph = ah_alloc_tmp(ahash, nfrags + sglists, hdr_len +
571				ahp->icv_trunc_len + seqhi_len);
572	if (!work_iph) {
573		err = -ENOMEM;
574		goto out;
575	}
576
577	auth_data = ah_tmp_auth((u8 *)work_iph, hdr_len);
578	seqhi = (__be32 *)(auth_data + ahp->icv_trunc_len);
579	icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
580	req = ah_tmp_req(ahash, icv);
581	sg = ah_req_sg(ahash, req);
582	seqhisg = sg + nfrags;
583
584	memcpy(work_iph, ip6h, hdr_len);
585	memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
586	memset(ah->auth_data, 0, ahp->icv_trunc_len);
587
588	err = ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN);
589	if (err)
590		goto out_free;
591
592	ip6h->priority    = 0;
593	ip6h->flow_lbl[0] = 0;
594	ip6h->flow_lbl[1] = 0;
595	ip6h->flow_lbl[2] = 0;
596	ip6h->hop_limit   = 0;
597
598	sg_init_table(sg, nfrags + sglists);
599	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
600	if (unlikely(err < 0))
601		goto out_free;
602
603	if (x->props.flags & XFRM_STATE_ESN) {
604		/* Attach seqhi sg right after packet payload */
605		*seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
606		sg_set_buf(seqhisg, seqhi, seqhi_len);
607	}
608
609	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
610	ahash_request_set_callback(req, 0, ah6_input_done, skb);
611
612	AH_SKB_CB(skb)->tmp = work_iph;
613
614	err = crypto_ahash_digest(req);
615	if (err) {
616		if (err == -EINPROGRESS)
617			goto out;
618
619		goto out_free;
620	}
621
622	err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
623	if (err)
624		goto out_free;
625
626	skb->network_header += ah_hlen;
627	memcpy(skb_network_header(skb), work_iph, hdr_len);
 
628	__skb_pull(skb, ah_hlen + hdr_len);
629
630	if (x->props.mode == XFRM_MODE_TUNNEL)
631		skb_reset_transport_header(skb);
632	else
633		skb_set_transport_header(skb, -hdr_len);
634
635	err = nexthdr;
636
637out_free:
638	kfree(work_iph);
639out:
640	return err;
641}
642
643static int ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
644		   u8 type, u8 code, int offset, __be32 info)
645{
646	struct net *net = dev_net(skb->dev);
647	struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
648	struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+offset);
649	struct xfrm_state *x;
650
651	if (type != ICMPV6_PKT_TOOBIG &&
652	    type != NDISC_REDIRECT)
653		return 0;
654
655	x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
656	if (!x)
657		return 0;
 
 
 
658
659	if (type == NDISC_REDIRECT)
660		ip6_redirect(skb, net, skb->dev->ifindex, 0,
661			     sock_net_uid(net, NULL));
662	else
663		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
664	xfrm_state_put(x);
665
666	return 0;
667}
668
669static int ah6_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
670{
671	struct ah_data *ahp = NULL;
672	struct xfrm_algo_desc *aalg_desc;
673	struct crypto_ahash *ahash;
674
675	if (!x->aalg) {
676		NL_SET_ERR_MSG(extack, "AH requires a state with an AUTH algorithm");
677		goto error;
678	}
679
680	if (x->encap) {
681		NL_SET_ERR_MSG(extack, "AH is not compatible with encapsulation");
682		goto error;
683	}
684
685	ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
686	if (!ahp)
687		return -ENOMEM;
688
689	ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
690	if (IS_ERR(ahash)) {
691		NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
692		goto error;
693	}
694
695	ahp->ahash = ahash;
696	if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
697			       (x->aalg->alg_key_len + 7) / 8)) {
698		NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
699		goto error;
700	}
701
702	/*
703	 * Lookup the algorithm description maintained by xfrm_algo,
704	 * verify crypto transform properties, and store information
705	 * we need for AH processing.  This lookup cannot fail here
706	 * after a successful crypto_alloc_hash().
707	 */
708	aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
709	BUG_ON(!aalg_desc);
710
711	if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
712	    crypto_ahash_digestsize(ahash)) {
713		NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
 
 
714		goto error;
715	}
716
717	ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
718	ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
719
 
 
720	x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
721					  ahp->icv_trunc_len);
722	switch (x->props.mode) {
723	case XFRM_MODE_BEET:
724	case XFRM_MODE_TRANSPORT:
725		break;
726	case XFRM_MODE_TUNNEL:
727		x->props.header_len += sizeof(struct ipv6hdr);
728		break;
729	default:
730		NL_SET_ERR_MSG(extack, "Invalid mode requested for AH, must be one of TRANSPORT, TUNNEL, BEET");
731		goto error;
732	}
733	x->data = ahp;
734
735	return 0;
736
737error:
738	if (ahp) {
739		crypto_free_ahash(ahp->ahash);
740		kfree(ahp);
741	}
742	return -EINVAL;
743}
744
745static void ah6_destroy(struct xfrm_state *x)
746{
747	struct ah_data *ahp = x->data;
748
749	if (!ahp)
750		return;
751
752	crypto_free_ahash(ahp->ahash);
753	kfree(ahp);
754}
755
756static int ah6_rcv_cb(struct sk_buff *skb, int err)
757{
758	return 0;
759}
760
761static const struct xfrm_type ah6_type = {
762	.owner		= THIS_MODULE,
763	.proto		= IPPROTO_AH,
764	.flags		= XFRM_TYPE_REPLAY_PROT,
765	.init_state	= ah6_init_state,
766	.destructor	= ah6_destroy,
767	.input		= ah6_input,
768	.output		= ah6_output,
 
769};
770
771static struct xfrm6_protocol ah6_protocol = {
772	.handler	=	xfrm6_rcv,
773	.input_handler	=	xfrm_input,
774	.cb_handler	=	ah6_rcv_cb,
775	.err_handler	=	ah6_err,
776	.priority	=	0,
777};
778
779static int __init ah6_init(void)
780{
781	if (xfrm_register_type(&ah6_type, AF_INET6) < 0) {
782		pr_info("%s: can't add xfrm type\n", __func__);
783		return -EAGAIN;
784	}
785
786	if (xfrm6_protocol_register(&ah6_protocol, IPPROTO_AH) < 0) {
787		pr_info("%s: can't add protocol\n", __func__);
788		xfrm_unregister_type(&ah6_type, AF_INET6);
789		return -EAGAIN;
790	}
791
792	return 0;
793}
794
795static void __exit ah6_fini(void)
796{
797	if (xfrm6_protocol_deregister(&ah6_protocol, IPPROTO_AH) < 0)
798		pr_info("%s: can't remove protocol\n", __func__);
799
800	xfrm_unregister_type(&ah6_type, AF_INET6);
 
 
801}
802
803module_init(ah6_init);
804module_exit(ah6_fini);
805
806MODULE_LICENSE("GPL");
807MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_AH);