Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright (C)2002 USAGI/WIDE Project
  4 *
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 * Authors
  6 *
  7 *	Mitsuru KANDA @USAGI       : IPv6 Support
  8 *	Kazunori MIYAZAWA @USAGI   :
  9 *	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
 10 *
 11 *	This file is derived from net/ipv4/ah.c.
 12 */
 13
 14#define pr_fmt(fmt) "IPv6: " fmt
 15
 
 16#include <crypto/hash.h>
 17#include <crypto/utils.h>
 18#include <linux/module.h>
 19#include <linux/slab.h>
 20#include <net/ip.h>
 21#include <net/ah.h>
 22#include <linux/crypto.h>
 23#include <linux/pfkeyv2.h>
 24#include <linux/string.h>
 25#include <linux/scatterlist.h>
 26#include <net/ip6_route.h>
 27#include <net/icmp.h>
 28#include <net/ipv6.h>
 29#include <net/protocol.h>
 30#include <net/xfrm.h>
 31
 32#define IPV6HDR_BASELEN 8
 33
 34struct tmp_ext {
 35#if IS_ENABLED(CONFIG_IPV6_MIP6)
 36		struct in6_addr saddr;
 37#endif
 38		struct in6_addr daddr;
 39		char hdrs[];
 40};
 41
 42struct ah_skb_cb {
 43	struct xfrm_skb_cb xfrm;
 44	void *tmp;
 45};
 46
 47#define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
 48
 49static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
 50			  unsigned int size)
 51{
 52	unsigned int len;
 53
 54	len = size + crypto_ahash_digestsize(ahash);
 
 
 55
 56	len = ALIGN(len, crypto_tfm_ctx_alignment());
 57
 58	len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
 59	len = ALIGN(len, __alignof__(struct scatterlist));
 60
 61	len += sizeof(struct scatterlist) * nfrags;
 62
 63	return kmalloc(len, GFP_ATOMIC);
 64}
 65
 66static inline struct tmp_ext *ah_tmp_ext(void *base)
 67{
 68	return base + IPV6HDR_BASELEN;
 69}
 70
 71static inline u8 *ah_tmp_auth(u8 *tmp, unsigned int offset)
 72{
 73	return tmp + offset;
 74}
 75
 76static inline u8 *ah_tmp_icv(void *tmp, unsigned int offset)
 
 77{
 78	return tmp + offset;
 79}
 80
 81static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
 82					       u8 *icv)
 83{
 84	struct ahash_request *req;
 85
 86	req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
 87				crypto_tfm_ctx_alignment());
 88
 89	ahash_request_set_tfm(req, ahash);
 90
 91	return req;
 92}
 93
 94static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
 95					     struct ahash_request *req)
 96{
 97	return (void *)ALIGN((unsigned long)(req + 1) +
 98			     crypto_ahash_reqsize(ahash),
 99			     __alignof__(struct scatterlist));
100}
101
102static bool zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
103{
104	u8 *opt = (u8 *)opthdr;
105	int len = ipv6_optlen(opthdr);
106	int off = 0;
107	int optlen = 0;
108
109	off += 2;
110	len -= 2;
111
112	while (len > 0) {
113
114		switch (opt[off]) {
115
116		case IPV6_TLV_PAD1:
117			optlen = 1;
118			break;
119		default:
120			if (len < 2)
121				goto bad;
122			optlen = opt[off+1]+2;
123			if (len < optlen)
124				goto bad;
125			if (opt[off] & 0x20)
126				memset(&opt[off+2], 0, opt[off+1]);
127			break;
128		}
129
130		off += optlen;
131		len -= optlen;
132	}
133	if (len == 0)
134		return true;
135
136bad:
137	return false;
138}
139
140#if IS_ENABLED(CONFIG_IPV6_MIP6)
141/**
142 *	ipv6_rearrange_destopt - rearrange IPv6 destination options header
143 *	@iph: IPv6 header
144 *	@destopt: destionation options header
145 */
146static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt)
147{
148	u8 *opt = (u8 *)destopt;
149	int len = ipv6_optlen(destopt);
150	int off = 0;
151	int optlen = 0;
152
153	off += 2;
154	len -= 2;
155
156	while (len > 0) {
157
158		switch (opt[off]) {
159
160		case IPV6_TLV_PAD1:
161			optlen = 1;
162			break;
163		default:
164			if (len < 2)
165				goto bad;
166			optlen = opt[off+1]+2;
167			if (len < optlen)
168				goto bad;
169
170			/* Rearrange the source address in @iph and the
171			 * addresses in home address option for final source.
172			 * See 11.3.2 of RFC 3775 for details.
173			 */
174			if (opt[off] == IPV6_TLV_HAO) {
 
175				struct ipv6_destopt_hao *hao;
176
177				hao = (struct ipv6_destopt_hao *)&opt[off];
178				if (hao->length != sizeof(hao->addr)) {
179					net_warn_ratelimited("destopt hao: invalid header length: %u\n",
180							     hao->length);
181					goto bad;
182				}
183				swap(hao->addr, iph->saddr);
 
 
184			}
185			break;
186		}
187
188		off += optlen;
189		len -= optlen;
190	}
191	/* Note: ok if len == 0 */
192bad:
193	return;
194}
195#else
196static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt) {}
197#endif
198
199/**
200 *	ipv6_rearrange_rthdr - rearrange IPv6 routing header
201 *	@iph: IPv6 header
202 *	@rthdr: routing header
203 *
204 *	Rearrange the destination address in @iph and the addresses in @rthdr
205 *	so that they appear in the order they will at the final destination.
206 *	See Appendix A2 of RFC 2402 for details.
207 */
208static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr)
209{
210	int segments, segments_left;
211	struct in6_addr *addrs;
212	struct in6_addr final_addr;
213
214	segments_left = rthdr->segments_left;
215	if (segments_left == 0)
216		return;
217	rthdr->segments_left = 0;
218
219	/* The value of rthdr->hdrlen has been verified either by the system
220	 * call if it is locally generated, or by ipv6_rthdr_rcv() for incoming
221	 * packets.  So we can assume that it is even and that segments is
222	 * greater than or equal to segments_left.
223	 *
224	 * For the same reason we can assume that this option is of type 0.
225	 */
226	segments = rthdr->hdrlen >> 1;
227
228	addrs = ((struct rt0_hdr *)rthdr)->addr;
229	final_addr = addrs[segments - 1];
230
231	addrs += segments - segments_left;
232	memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs));
233
234	addrs[0] = iph->daddr;
235	iph->daddr = final_addr;
236}
237
238static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
239{
240	union {
241		struct ipv6hdr *iph;
242		struct ipv6_opt_hdr *opth;
243		struct ipv6_rt_hdr *rth;
244		char *raw;
245	} exthdr = { .iph = iph };
246	char *end = exthdr.raw + len;
247	int nexthdr = iph->nexthdr;
248
249	exthdr.iph++;
250
251	while (exthdr.raw < end) {
252		switch (nexthdr) {
253		case NEXTHDR_DEST:
254			if (dir == XFRM_POLICY_OUT)
255				ipv6_rearrange_destopt(iph, exthdr.opth);
256			fallthrough;
257		case NEXTHDR_HOP:
258			if (!zero_out_mutable_opts(exthdr.opth)) {
259				net_dbg_ratelimited("overrun %sopts\n",
260						    nexthdr == NEXTHDR_HOP ?
261						    "hop" : "dest");
262				return -EINVAL;
263			}
264			break;
265
266		case NEXTHDR_ROUTING:
267			ipv6_rearrange_rthdr(iph, exthdr.rth);
268			break;
269
270		default:
271			return 0;
272		}
273
274		nexthdr = exthdr.opth->nexthdr;
275		exthdr.raw += ipv6_optlen(exthdr.opth);
276	}
277
278	return 0;
279}
280
281static void ah6_output_done(void *data, int err)
282{
283	int extlen;
284	u8 *iph_base;
285	u8 *icv;
286	struct sk_buff *skb = data;
287	struct xfrm_state *x = skb_dst(skb)->xfrm;
288	struct ah_data *ahp = x->data;
289	struct ipv6hdr *top_iph = ipv6_hdr(skb);
290	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
291	struct tmp_ext *iph_ext;
292
293	extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
294	if (extlen)
295		extlen += sizeof(*iph_ext);
296
297	iph_base = AH_SKB_CB(skb)->tmp;
298	iph_ext = ah_tmp_ext(iph_base);
299	icv = ah_tmp_icv(iph_ext, extlen);
300
301	memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
302	memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
303
304	if (extlen) {
305#if IS_ENABLED(CONFIG_IPV6_MIP6)
306		memcpy(&top_iph->saddr, iph_ext, extlen);
307#else
308		memcpy(&top_iph->daddr, iph_ext, extlen);
309#endif
310	}
311
312	kfree(AH_SKB_CB(skb)->tmp);
313	xfrm_output_resume(skb->sk, skb, err);
314}
315
316static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
317{
318	int err;
319	int nfrags;
320	int extlen;
321	u8 *iph_base;
322	u8 *icv;
323	u8 nexthdr;
324	struct sk_buff *trailer;
325	struct crypto_ahash *ahash;
326	struct ahash_request *req;
327	struct scatterlist *sg;
328	struct ipv6hdr *top_iph;
329	struct ip_auth_hdr *ah;
330	struct ah_data *ahp;
331	struct tmp_ext *iph_ext;
332	int seqhi_len = 0;
333	__be32 *seqhi;
334	int sglists = 0;
335	struct scatterlist *seqhisg;
336
337	ahp = x->data;
338	ahash = ahp->ahash;
339
340	err = skb_cow_data(skb, 0, &trailer);
341	if (err < 0)
342		goto out;
343	nfrags = err;
344
345	skb_push(skb, -skb_network_offset(skb));
346	extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
347	if (extlen)
348		extlen += sizeof(*iph_ext);
349
350	if (x->props.flags & XFRM_STATE_ESN) {
351		sglists = 1;
352		seqhi_len = sizeof(*seqhi);
353	}
354	err = -ENOMEM;
355	iph_base = ah_alloc_tmp(ahash, nfrags + sglists, IPV6HDR_BASELEN +
356				extlen + seqhi_len);
357	if (!iph_base)
358		goto out;
359
360	iph_ext = ah_tmp_ext(iph_base);
361	seqhi = (__be32 *)((char *)iph_ext + extlen);
362	icv = ah_tmp_icv(seqhi, seqhi_len);
363	req = ah_tmp_req(ahash, icv);
364	sg = ah_req_sg(ahash, req);
365	seqhisg = sg + nfrags;
366
367	ah = ip_auth_hdr(skb);
368	memset(ah->auth_data, 0, ahp->icv_trunc_len);
369
370	top_iph = ipv6_hdr(skb);
371	top_iph->payload_len = htons(skb->len - sizeof(*top_iph));
372
373	nexthdr = *skb_mac_header(skb);
374	*skb_mac_header(skb) = IPPROTO_AH;
375
376	/* When there are no extension headers, we only need to save the first
377	 * 8 bytes of the base IP header.
378	 */
379	memcpy(iph_base, top_iph, IPV6HDR_BASELEN);
380
381	if (extlen) {
382#if IS_ENABLED(CONFIG_IPV6_MIP6)
383		memcpy(iph_ext, &top_iph->saddr, extlen);
384#else
385		memcpy(iph_ext, &top_iph->daddr, extlen);
386#endif
387		err = ipv6_clear_mutable_options(top_iph,
388						 extlen - sizeof(*iph_ext) +
389						 sizeof(*top_iph),
390						 XFRM_POLICY_OUT);
391		if (err)
392			goto out_free;
393	}
394
395	ah->nexthdr = nexthdr;
396
397	top_iph->priority    = 0;
398	top_iph->flow_lbl[0] = 0;
399	top_iph->flow_lbl[1] = 0;
400	top_iph->flow_lbl[2] = 0;
401	top_iph->hop_limit   = 0;
402
403	ah->hdrlen  = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
404
405	ah->reserved = 0;
406	ah->spi = x->id.spi;
407	ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
408
409	sg_init_table(sg, nfrags + sglists);
410	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
411	if (unlikely(err < 0))
412		goto out_free;
413
414	if (x->props.flags & XFRM_STATE_ESN) {
415		/* Attach seqhi sg right after packet payload */
416		*seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
417		sg_set_buf(seqhisg, seqhi, seqhi_len);
418	}
419	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
420	ahash_request_set_callback(req, 0, ah6_output_done, skb);
421
422	AH_SKB_CB(skb)->tmp = iph_base;
423
424	err = crypto_ahash_digest(req);
425	if (err) {
426		if (err == -EINPROGRESS)
427			goto out;
428
429		if (err == -ENOSPC)
430			err = NET_XMIT_DROP;
431		goto out_free;
432	}
433
434	memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
435	memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
436
437	if (extlen) {
438#if IS_ENABLED(CONFIG_IPV6_MIP6)
439		memcpy(&top_iph->saddr, iph_ext, extlen);
440#else
441		memcpy(&top_iph->daddr, iph_ext, extlen);
442#endif
443	}
444
445out_free:
446	kfree(iph_base);
447out:
448	return err;
449}
450
451static void ah6_input_done(void *data, int err)
452{
453	u8 *auth_data;
454	u8 *icv;
455	u8 *work_iph;
456	struct sk_buff *skb = data;
457	struct xfrm_state *x = xfrm_input_state(skb);
458	struct ah_data *ahp = x->data;
459	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
460	int hdr_len = skb_network_header_len(skb);
461	int ah_hlen = ipv6_authlen(ah);
462
463	if (err)
464		goto out;
465
466	work_iph = AH_SKB_CB(skb)->tmp;
467	auth_data = ah_tmp_auth(work_iph, hdr_len);
468	icv = ah_tmp_icv(auth_data, ahp->icv_trunc_len);
469
470	err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
471	if (err)
472		goto out;
473
474	err = ah->nexthdr;
475
476	skb->network_header += ah_hlen;
477	memcpy(skb_network_header(skb), work_iph, hdr_len);
478	__skb_pull(skb, ah_hlen + hdr_len);
479	if (x->props.mode == XFRM_MODE_TUNNEL)
480		skb_reset_transport_header(skb);
481	else
482		skb_set_transport_header(skb, -hdr_len);
483out:
484	kfree(AH_SKB_CB(skb)->tmp);
485	xfrm_input_resume(skb, err);
486}
487
488
489
490static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
491{
492	/*
493	 * Before process AH
494	 * [IPv6][Ext1][Ext2][AH][Dest][Payload]
495	 * |<-------------->| hdr_len
496	 *
497	 * To erase AH:
498	 * Keeping copy of cleared headers. After AH processing,
499	 * Moving the pointer of skb->network_header by using skb_pull as long
500	 * as AH header length. Then copy back the copy as long as hdr_len
501	 * If destination header following AH exists, copy it into after [Ext2].
502	 *
503	 * |<>|[IPv6][Ext1][Ext2][Dest][Payload]
504	 * There is offset of AH before IPv6 header after the process.
505	 */
506
507	u8 *auth_data;
508	u8 *icv;
509	u8 *work_iph;
510	struct sk_buff *trailer;
511	struct crypto_ahash *ahash;
512	struct ahash_request *req;
513	struct scatterlist *sg;
514	struct ip_auth_hdr *ah;
515	struct ipv6hdr *ip6h;
516	struct ah_data *ahp;
517	u16 hdr_len;
518	u16 ah_hlen;
519	int nexthdr;
520	int nfrags;
521	int err = -ENOMEM;
522	int seqhi_len = 0;
523	__be32 *seqhi;
524	int sglists = 0;
525	struct scatterlist *seqhisg;
526
527	if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
528		goto out;
529
530	/* We are going to _remove_ AH header to keep sockets happy,
531	 * so... Later this can change. */
532	if (skb_unclone(skb, GFP_ATOMIC))
533		goto out;
534
535	skb->ip_summed = CHECKSUM_NONE;
536
537	hdr_len = skb_network_header_len(skb);
538	ah = (struct ip_auth_hdr *)skb->data;
539	ahp = x->data;
540	ahash = ahp->ahash;
541
542	nexthdr = ah->nexthdr;
543	ah_hlen = ipv6_authlen(ah);
544
545	if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
546	    ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
547		goto out;
548
549	if (!pskb_may_pull(skb, ah_hlen))
550		goto out;
551
552	err = skb_cow_data(skb, 0, &trailer);
553	if (err < 0)
554		goto out;
555	nfrags = err;
556
557	ah = (struct ip_auth_hdr *)skb->data;
558	ip6h = ipv6_hdr(skb);
559
560	skb_push(skb, hdr_len);
561
562	if (x->props.flags & XFRM_STATE_ESN) {
563		sglists = 1;
564		seqhi_len = sizeof(*seqhi);
565	}
566
567	work_iph = ah_alloc_tmp(ahash, nfrags + sglists, hdr_len +
568				ahp->icv_trunc_len + seqhi_len);
569	if (!work_iph) {
570		err = -ENOMEM;
571		goto out;
572	}
573
574	auth_data = ah_tmp_auth((u8 *)work_iph, hdr_len);
575	seqhi = (__be32 *)(auth_data + ahp->icv_trunc_len);
576	icv = ah_tmp_icv(seqhi, seqhi_len);
577	req = ah_tmp_req(ahash, icv);
578	sg = ah_req_sg(ahash, req);
579	seqhisg = sg + nfrags;
580
581	memcpy(work_iph, ip6h, hdr_len);
582	memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
583	memset(ah->auth_data, 0, ahp->icv_trunc_len);
584
585	err = ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN);
586	if (err)
587		goto out_free;
588
589	ip6h->priority    = 0;
590	ip6h->flow_lbl[0] = 0;
591	ip6h->flow_lbl[1] = 0;
592	ip6h->flow_lbl[2] = 0;
593	ip6h->hop_limit   = 0;
594
595	sg_init_table(sg, nfrags + sglists);
596	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
597	if (unlikely(err < 0))
598		goto out_free;
599
600	if (x->props.flags & XFRM_STATE_ESN) {
601		/* Attach seqhi sg right after packet payload */
602		*seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
603		sg_set_buf(seqhisg, seqhi, seqhi_len);
604	}
605
606	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
607	ahash_request_set_callback(req, 0, ah6_input_done, skb);
608
609	AH_SKB_CB(skb)->tmp = work_iph;
610
611	err = crypto_ahash_digest(req);
612	if (err) {
613		if (err == -EINPROGRESS)
614			goto out;
615
616		goto out_free;
617	}
618
619	err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
620	if (err)
621		goto out_free;
622
623	skb->network_header += ah_hlen;
624	memcpy(skb_network_header(skb), work_iph, hdr_len);
625	__skb_pull(skb, ah_hlen + hdr_len);
626
627	if (x->props.mode == XFRM_MODE_TUNNEL)
628		skb_reset_transport_header(skb);
629	else
630		skb_set_transport_header(skb, -hdr_len);
631
632	err = nexthdr;
633
634out_free:
635	kfree(work_iph);
636out:
637	return err;
638}
639
640static int ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
641		   u8 type, u8 code, int offset, __be32 info)
642{
643	struct net *net = dev_net(skb->dev);
644	struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
645	struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+offset);
646	struct xfrm_state *x;
647
648	if (type != ICMPV6_PKT_TOOBIG &&
649	    type != NDISC_REDIRECT)
650		return 0;
651
652	x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
653	if (!x)
654		return 0;
655
656	if (type == NDISC_REDIRECT)
657		ip6_redirect(skb, net, skb->dev->ifindex, 0,
658			     sock_net_uid(net, NULL));
659	else
660		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
661	xfrm_state_put(x);
662
663	return 0;
664}
665
666static int ah6_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
667{
668	struct ah_data *ahp = NULL;
669	struct xfrm_algo_desc *aalg_desc;
670	struct crypto_ahash *ahash;
671
672	if (!x->aalg) {
673		NL_SET_ERR_MSG(extack, "AH requires a state with an AUTH algorithm");
674		goto error;
675	}
676
677	if (x->encap) {
678		NL_SET_ERR_MSG(extack, "AH is not compatible with encapsulation");
679		goto error;
680	}
681
682	ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
683	if (!ahp)
684		return -ENOMEM;
685
686	ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
687	if (IS_ERR(ahash)) {
688		NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
689		goto error;
690	}
691
692	ahp->ahash = ahash;
693	if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
694			       (x->aalg->alg_key_len + 7) / 8)) {
695		NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
696		goto error;
697	}
698
699	/*
700	 * Lookup the algorithm description maintained by xfrm_algo,
701	 * verify crypto transform properties, and store information
702	 * we need for AH processing.  This lookup cannot fail here
703	 * after a successful crypto_alloc_hash().
704	 */
705	aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
706	BUG_ON(!aalg_desc);
707
708	if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
709	    crypto_ahash_digestsize(ahash)) {
710		NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
 
 
711		goto error;
712	}
713
714	ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
715	ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
716
717	x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
718					  ahp->icv_trunc_len);
719	switch (x->props.mode) {
720	case XFRM_MODE_BEET:
721	case XFRM_MODE_TRANSPORT:
722		break;
723	case XFRM_MODE_TUNNEL:
724		x->props.header_len += sizeof(struct ipv6hdr);
725		break;
726	default:
727		NL_SET_ERR_MSG(extack, "Invalid mode requested for AH, must be one of TRANSPORT, TUNNEL, BEET");
728		goto error;
729	}
730	x->data = ahp;
731
732	return 0;
733
734error:
735	if (ahp) {
736		crypto_free_ahash(ahp->ahash);
737		kfree(ahp);
738	}
739	return -EINVAL;
740}
741
742static void ah6_destroy(struct xfrm_state *x)
743{
744	struct ah_data *ahp = x->data;
745
746	if (!ahp)
747		return;
748
749	crypto_free_ahash(ahp->ahash);
750	kfree(ahp);
751}
752
753static int ah6_rcv_cb(struct sk_buff *skb, int err)
754{
755	return 0;
756}
757
758static const struct xfrm_type ah6_type = {
 
759	.owner		= THIS_MODULE,
760	.proto		= IPPROTO_AH,
761	.flags		= XFRM_TYPE_REPLAY_PROT,
762	.init_state	= ah6_init_state,
763	.destructor	= ah6_destroy,
764	.input		= ah6_input,
765	.output		= ah6_output,
 
766};
767
768static struct xfrm6_protocol ah6_protocol = {
769	.handler	=	xfrm6_rcv,
770	.input_handler	=	xfrm_input,
771	.cb_handler	=	ah6_rcv_cb,
772	.err_handler	=	ah6_err,
773	.priority	=	0,
774};
775
776static int __init ah6_init(void)
777{
778	if (xfrm_register_type(&ah6_type, AF_INET6) < 0) {
779		pr_info("%s: can't add xfrm type\n", __func__);
780		return -EAGAIN;
781	}
782
783	if (xfrm6_protocol_register(&ah6_protocol, IPPROTO_AH) < 0) {
784		pr_info("%s: can't add protocol\n", __func__);
785		xfrm_unregister_type(&ah6_type, AF_INET6);
786		return -EAGAIN;
787	}
788
789	return 0;
790}
791
792static void __exit ah6_fini(void)
793{
794	if (xfrm6_protocol_deregister(&ah6_protocol, IPPROTO_AH) < 0)
795		pr_info("%s: can't remove protocol\n", __func__);
796
797	xfrm_unregister_type(&ah6_type, AF_INET6);
 
 
798}
799
800module_init(ah6_init);
801module_exit(ah6_fini);
802
803MODULE_DESCRIPTION("IPv6 AH transformation helpers");
804MODULE_LICENSE("GPL");
805MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_AH);
v4.17
 
  1/*
  2 * Copyright (C)2002 USAGI/WIDE Project
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
 16 *
 17 * Authors
 18 *
 19 *	Mitsuru KANDA @USAGI       : IPv6 Support
 20 *	Kazunori MIYAZAWA @USAGI   :
 21 *	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
 22 *
 23 *	This file is derived from net/ipv4/ah.c.
 24 */
 25
 26#define pr_fmt(fmt) "IPv6: " fmt
 27
 28#include <crypto/algapi.h>
 29#include <crypto/hash.h>
 
 30#include <linux/module.h>
 31#include <linux/slab.h>
 32#include <net/ip.h>
 33#include <net/ah.h>
 34#include <linux/crypto.h>
 35#include <linux/pfkeyv2.h>
 36#include <linux/string.h>
 37#include <linux/scatterlist.h>
 38#include <net/ip6_route.h>
 39#include <net/icmp.h>
 40#include <net/ipv6.h>
 41#include <net/protocol.h>
 42#include <net/xfrm.h>
 43
 44#define IPV6HDR_BASELEN 8
 45
 46struct tmp_ext {
 47#if IS_ENABLED(CONFIG_IPV6_MIP6)
 48		struct in6_addr saddr;
 49#endif
 50		struct in6_addr daddr;
 51		char hdrs[0];
 52};
 53
 54struct ah_skb_cb {
 55	struct xfrm_skb_cb xfrm;
 56	void *tmp;
 57};
 58
 59#define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
 60
 61static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
 62			  unsigned int size)
 63{
 64	unsigned int len;
 65
 66	len = size + crypto_ahash_digestsize(ahash) +
 67	      (crypto_ahash_alignmask(ahash) &
 68	       ~(crypto_tfm_ctx_alignment() - 1));
 69
 70	len = ALIGN(len, crypto_tfm_ctx_alignment());
 71
 72	len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
 73	len = ALIGN(len, __alignof__(struct scatterlist));
 74
 75	len += sizeof(struct scatterlist) * nfrags;
 76
 77	return kmalloc(len, GFP_ATOMIC);
 78}
 79
 80static inline struct tmp_ext *ah_tmp_ext(void *base)
 81{
 82	return base + IPV6HDR_BASELEN;
 83}
 84
 85static inline u8 *ah_tmp_auth(u8 *tmp, unsigned int offset)
 86{
 87	return tmp + offset;
 88}
 89
 90static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
 91			     unsigned int offset)
 92{
 93	return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
 94}
 95
 96static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
 97					       u8 *icv)
 98{
 99	struct ahash_request *req;
100
101	req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
102				crypto_tfm_ctx_alignment());
103
104	ahash_request_set_tfm(req, ahash);
105
106	return req;
107}
108
109static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
110					     struct ahash_request *req)
111{
112	return (void *)ALIGN((unsigned long)(req + 1) +
113			     crypto_ahash_reqsize(ahash),
114			     __alignof__(struct scatterlist));
115}
116
117static bool zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
118{
119	u8 *opt = (u8 *)opthdr;
120	int len = ipv6_optlen(opthdr);
121	int off = 0;
122	int optlen = 0;
123
124	off += 2;
125	len -= 2;
126
127	while (len > 0) {
128
129		switch (opt[off]) {
130
131		case IPV6_TLV_PAD1:
132			optlen = 1;
133			break;
134		default:
135			if (len < 2)
136				goto bad;
137			optlen = opt[off+1]+2;
138			if (len < optlen)
139				goto bad;
140			if (opt[off] & 0x20)
141				memset(&opt[off+2], 0, opt[off+1]);
142			break;
143		}
144
145		off += optlen;
146		len -= optlen;
147	}
148	if (len == 0)
149		return true;
150
151bad:
152	return false;
153}
154
155#if IS_ENABLED(CONFIG_IPV6_MIP6)
156/**
157 *	ipv6_rearrange_destopt - rearrange IPv6 destination options header
158 *	@iph: IPv6 header
159 *	@destopt: destionation options header
160 */
161static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt)
162{
163	u8 *opt = (u8 *)destopt;
164	int len = ipv6_optlen(destopt);
165	int off = 0;
166	int optlen = 0;
167
168	off += 2;
169	len -= 2;
170
171	while (len > 0) {
172
173		switch (opt[off]) {
174
175		case IPV6_TLV_PAD1:
176			optlen = 1;
177			break;
178		default:
179			if (len < 2)
180				goto bad;
181			optlen = opt[off+1]+2;
182			if (len < optlen)
183				goto bad;
184
185			/* Rearrange the source address in @iph and the
186			 * addresses in home address option for final source.
187			 * See 11.3.2 of RFC 3775 for details.
188			 */
189			if (opt[off] == IPV6_TLV_HAO) {
190				struct in6_addr final_addr;
191				struct ipv6_destopt_hao *hao;
192
193				hao = (struct ipv6_destopt_hao *)&opt[off];
194				if (hao->length != sizeof(hao->addr)) {
195					net_warn_ratelimited("destopt hao: invalid header length: %u\n",
196							     hao->length);
197					goto bad;
198				}
199				final_addr = hao->addr;
200				hao->addr = iph->saddr;
201				iph->saddr = final_addr;
202			}
203			break;
204		}
205
206		off += optlen;
207		len -= optlen;
208	}
209	/* Note: ok if len == 0 */
210bad:
211	return;
212}
213#else
214static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt) {}
215#endif
216
217/**
218 *	ipv6_rearrange_rthdr - rearrange IPv6 routing header
219 *	@iph: IPv6 header
220 *	@rthdr: routing header
221 *
222 *	Rearrange the destination address in @iph and the addresses in @rthdr
223 *	so that they appear in the order they will at the final destination.
224 *	See Appendix A2 of RFC 2402 for details.
225 */
226static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr)
227{
228	int segments, segments_left;
229	struct in6_addr *addrs;
230	struct in6_addr final_addr;
231
232	segments_left = rthdr->segments_left;
233	if (segments_left == 0)
234		return;
235	rthdr->segments_left = 0;
236
237	/* The value of rthdr->hdrlen has been verified either by the system
238	 * call if it is locally generated, or by ipv6_rthdr_rcv() for incoming
239	 * packets.  So we can assume that it is even and that segments is
240	 * greater than or equal to segments_left.
241	 *
242	 * For the same reason we can assume that this option is of type 0.
243	 */
244	segments = rthdr->hdrlen >> 1;
245
246	addrs = ((struct rt0_hdr *)rthdr)->addr;
247	final_addr = addrs[segments - 1];
248
249	addrs += segments - segments_left;
250	memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs));
251
252	addrs[0] = iph->daddr;
253	iph->daddr = final_addr;
254}
255
256static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
257{
258	union {
259		struct ipv6hdr *iph;
260		struct ipv6_opt_hdr *opth;
261		struct ipv6_rt_hdr *rth;
262		char *raw;
263	} exthdr = { .iph = iph };
264	char *end = exthdr.raw + len;
265	int nexthdr = iph->nexthdr;
266
267	exthdr.iph++;
268
269	while (exthdr.raw < end) {
270		switch (nexthdr) {
271		case NEXTHDR_DEST:
272			if (dir == XFRM_POLICY_OUT)
273				ipv6_rearrange_destopt(iph, exthdr.opth);
274			/* fall through */
275		case NEXTHDR_HOP:
276			if (!zero_out_mutable_opts(exthdr.opth)) {
277				net_dbg_ratelimited("overrun %sopts\n",
278						    nexthdr == NEXTHDR_HOP ?
279						    "hop" : "dest");
280				return -EINVAL;
281			}
282			break;
283
284		case NEXTHDR_ROUTING:
285			ipv6_rearrange_rthdr(iph, exthdr.rth);
286			break;
287
288		default:
289			return 0;
290		}
291
292		nexthdr = exthdr.opth->nexthdr;
293		exthdr.raw += ipv6_optlen(exthdr.opth);
294	}
295
296	return 0;
297}
298
299static void ah6_output_done(struct crypto_async_request *base, int err)
300{
301	int extlen;
302	u8 *iph_base;
303	u8 *icv;
304	struct sk_buff *skb = base->data;
305	struct xfrm_state *x = skb_dst(skb)->xfrm;
306	struct ah_data *ahp = x->data;
307	struct ipv6hdr *top_iph = ipv6_hdr(skb);
308	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
309	struct tmp_ext *iph_ext;
310
311	extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
312	if (extlen)
313		extlen += sizeof(*iph_ext);
314
315	iph_base = AH_SKB_CB(skb)->tmp;
316	iph_ext = ah_tmp_ext(iph_base);
317	icv = ah_tmp_icv(ahp->ahash, iph_ext, extlen);
318
319	memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
320	memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
321
322	if (extlen) {
323#if IS_ENABLED(CONFIG_IPV6_MIP6)
324		memcpy(&top_iph->saddr, iph_ext, extlen);
325#else
326		memcpy(&top_iph->daddr, iph_ext, extlen);
327#endif
328	}
329
330	kfree(AH_SKB_CB(skb)->tmp);
331	xfrm_output_resume(skb, err);
332}
333
334static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
335{
336	int err;
337	int nfrags;
338	int extlen;
339	u8 *iph_base;
340	u8 *icv;
341	u8 nexthdr;
342	struct sk_buff *trailer;
343	struct crypto_ahash *ahash;
344	struct ahash_request *req;
345	struct scatterlist *sg;
346	struct ipv6hdr *top_iph;
347	struct ip_auth_hdr *ah;
348	struct ah_data *ahp;
349	struct tmp_ext *iph_ext;
350	int seqhi_len = 0;
351	__be32 *seqhi;
352	int sglists = 0;
353	struct scatterlist *seqhisg;
354
355	ahp = x->data;
356	ahash = ahp->ahash;
357
358	err = skb_cow_data(skb, 0, &trailer);
359	if (err < 0)
360		goto out;
361	nfrags = err;
362
363	skb_push(skb, -skb_network_offset(skb));
364	extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
365	if (extlen)
366		extlen += sizeof(*iph_ext);
367
368	if (x->props.flags & XFRM_STATE_ESN) {
369		sglists = 1;
370		seqhi_len = sizeof(*seqhi);
371	}
372	err = -ENOMEM;
373	iph_base = ah_alloc_tmp(ahash, nfrags + sglists, IPV6HDR_BASELEN +
374				extlen + seqhi_len);
375	if (!iph_base)
376		goto out;
377
378	iph_ext = ah_tmp_ext(iph_base);
379	seqhi = (__be32 *)((char *)iph_ext + extlen);
380	icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
381	req = ah_tmp_req(ahash, icv);
382	sg = ah_req_sg(ahash, req);
383	seqhisg = sg + nfrags;
384
385	ah = ip_auth_hdr(skb);
386	memset(ah->auth_data, 0, ahp->icv_trunc_len);
387
388	top_iph = ipv6_hdr(skb);
389	top_iph->payload_len = htons(skb->len - sizeof(*top_iph));
390
391	nexthdr = *skb_mac_header(skb);
392	*skb_mac_header(skb) = IPPROTO_AH;
393
394	/* When there are no extension headers, we only need to save the first
395	 * 8 bytes of the base IP header.
396	 */
397	memcpy(iph_base, top_iph, IPV6HDR_BASELEN);
398
399	if (extlen) {
400#if IS_ENABLED(CONFIG_IPV6_MIP6)
401		memcpy(iph_ext, &top_iph->saddr, extlen);
402#else
403		memcpy(iph_ext, &top_iph->daddr, extlen);
404#endif
405		err = ipv6_clear_mutable_options(top_iph,
406						 extlen - sizeof(*iph_ext) +
407						 sizeof(*top_iph),
408						 XFRM_POLICY_OUT);
409		if (err)
410			goto out_free;
411	}
412
413	ah->nexthdr = nexthdr;
414
415	top_iph->priority    = 0;
416	top_iph->flow_lbl[0] = 0;
417	top_iph->flow_lbl[1] = 0;
418	top_iph->flow_lbl[2] = 0;
419	top_iph->hop_limit   = 0;
420
421	ah->hdrlen  = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
422
423	ah->reserved = 0;
424	ah->spi = x->id.spi;
425	ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
426
427	sg_init_table(sg, nfrags + sglists);
428	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
429	if (unlikely(err < 0))
430		goto out_free;
431
432	if (x->props.flags & XFRM_STATE_ESN) {
433		/* Attach seqhi sg right after packet payload */
434		*seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
435		sg_set_buf(seqhisg, seqhi, seqhi_len);
436	}
437	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
438	ahash_request_set_callback(req, 0, ah6_output_done, skb);
439
440	AH_SKB_CB(skb)->tmp = iph_base;
441
442	err = crypto_ahash_digest(req);
443	if (err) {
444		if (err == -EINPROGRESS)
445			goto out;
446
447		if (err == -ENOSPC)
448			err = NET_XMIT_DROP;
449		goto out_free;
450	}
451
452	memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
453	memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
454
455	if (extlen) {
456#if IS_ENABLED(CONFIG_IPV6_MIP6)
457		memcpy(&top_iph->saddr, iph_ext, extlen);
458#else
459		memcpy(&top_iph->daddr, iph_ext, extlen);
460#endif
461	}
462
463out_free:
464	kfree(iph_base);
465out:
466	return err;
467}
468
469static void ah6_input_done(struct crypto_async_request *base, int err)
470{
471	u8 *auth_data;
472	u8 *icv;
473	u8 *work_iph;
474	struct sk_buff *skb = base->data;
475	struct xfrm_state *x = xfrm_input_state(skb);
476	struct ah_data *ahp = x->data;
477	struct ip_auth_hdr *ah = ip_auth_hdr(skb);
478	int hdr_len = skb_network_header_len(skb);
479	int ah_hlen = (ah->hdrlen + 2) << 2;
480
481	if (err)
482		goto out;
483
484	work_iph = AH_SKB_CB(skb)->tmp;
485	auth_data = ah_tmp_auth(work_iph, hdr_len);
486	icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
487
488	err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
489	if (err)
490		goto out;
491
492	err = ah->nexthdr;
493
494	skb->network_header += ah_hlen;
495	memcpy(skb_network_header(skb), work_iph, hdr_len);
496	__skb_pull(skb, ah_hlen + hdr_len);
497	if (x->props.mode == XFRM_MODE_TUNNEL)
498		skb_reset_transport_header(skb);
499	else
500		skb_set_transport_header(skb, -hdr_len);
501out:
502	kfree(AH_SKB_CB(skb)->tmp);
503	xfrm_input_resume(skb, err);
504}
505
506
507
508static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
509{
510	/*
511	 * Before process AH
512	 * [IPv6][Ext1][Ext2][AH][Dest][Payload]
513	 * |<-------------->| hdr_len
514	 *
515	 * To erase AH:
516	 * Keeping copy of cleared headers. After AH processing,
517	 * Moving the pointer of skb->network_header by using skb_pull as long
518	 * as AH header length. Then copy back the copy as long as hdr_len
519	 * If destination header following AH exists, copy it into after [Ext2].
520	 *
521	 * |<>|[IPv6][Ext1][Ext2][Dest][Payload]
522	 * There is offset of AH before IPv6 header after the process.
523	 */
524
525	u8 *auth_data;
526	u8 *icv;
527	u8 *work_iph;
528	struct sk_buff *trailer;
529	struct crypto_ahash *ahash;
530	struct ahash_request *req;
531	struct scatterlist *sg;
532	struct ip_auth_hdr *ah;
533	struct ipv6hdr *ip6h;
534	struct ah_data *ahp;
535	u16 hdr_len;
536	u16 ah_hlen;
537	int nexthdr;
538	int nfrags;
539	int err = -ENOMEM;
540	int seqhi_len = 0;
541	__be32 *seqhi;
542	int sglists = 0;
543	struct scatterlist *seqhisg;
544
545	if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
546		goto out;
547
548	/* We are going to _remove_ AH header to keep sockets happy,
549	 * so... Later this can change. */
550	if (skb_unclone(skb, GFP_ATOMIC))
551		goto out;
552
553	skb->ip_summed = CHECKSUM_NONE;
554
555	hdr_len = skb_network_header_len(skb);
556	ah = (struct ip_auth_hdr *)skb->data;
557	ahp = x->data;
558	ahash = ahp->ahash;
559
560	nexthdr = ah->nexthdr;
561	ah_hlen = (ah->hdrlen + 2) << 2;
562
563	if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
564	    ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
565		goto out;
566
567	if (!pskb_may_pull(skb, ah_hlen))
568		goto out;
569
570	err = skb_cow_data(skb, 0, &trailer);
571	if (err < 0)
572		goto out;
573	nfrags = err;
574
575	ah = (struct ip_auth_hdr *)skb->data;
576	ip6h = ipv6_hdr(skb);
577
578	skb_push(skb, hdr_len);
579
580	if (x->props.flags & XFRM_STATE_ESN) {
581		sglists = 1;
582		seqhi_len = sizeof(*seqhi);
583	}
584
585	work_iph = ah_alloc_tmp(ahash, nfrags + sglists, hdr_len +
586				ahp->icv_trunc_len + seqhi_len);
587	if (!work_iph) {
588		err = -ENOMEM;
589		goto out;
590	}
591
592	auth_data = ah_tmp_auth((u8 *)work_iph, hdr_len);
593	seqhi = (__be32 *)(auth_data + ahp->icv_trunc_len);
594	icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
595	req = ah_tmp_req(ahash, icv);
596	sg = ah_req_sg(ahash, req);
597	seqhisg = sg + nfrags;
598
599	memcpy(work_iph, ip6h, hdr_len);
600	memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
601	memset(ah->auth_data, 0, ahp->icv_trunc_len);
602
603	if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN))
 
604		goto out_free;
605
606	ip6h->priority    = 0;
607	ip6h->flow_lbl[0] = 0;
608	ip6h->flow_lbl[1] = 0;
609	ip6h->flow_lbl[2] = 0;
610	ip6h->hop_limit   = 0;
611
612	sg_init_table(sg, nfrags + sglists);
613	err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
614	if (unlikely(err < 0))
615		goto out_free;
616
617	if (x->props.flags & XFRM_STATE_ESN) {
618		/* Attach seqhi sg right after packet payload */
619		*seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
620		sg_set_buf(seqhisg, seqhi, seqhi_len);
621	}
622
623	ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
624	ahash_request_set_callback(req, 0, ah6_input_done, skb);
625
626	AH_SKB_CB(skb)->tmp = work_iph;
627
628	err = crypto_ahash_digest(req);
629	if (err) {
630		if (err == -EINPROGRESS)
631			goto out;
632
633		goto out_free;
634	}
635
636	err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
637	if (err)
638		goto out_free;
639
640	skb->network_header += ah_hlen;
641	memcpy(skb_network_header(skb), work_iph, hdr_len);
642	__skb_pull(skb, ah_hlen + hdr_len);
643
644	if (x->props.mode == XFRM_MODE_TUNNEL)
645		skb_reset_transport_header(skb);
646	else
647		skb_set_transport_header(skb, -hdr_len);
648
649	err = nexthdr;
650
651out_free:
652	kfree(work_iph);
653out:
654	return err;
655}
656
657static int ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
658		   u8 type, u8 code, int offset, __be32 info)
659{
660	struct net *net = dev_net(skb->dev);
661	struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
662	struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+offset);
663	struct xfrm_state *x;
664
665	if (type != ICMPV6_PKT_TOOBIG &&
666	    type != NDISC_REDIRECT)
667		return 0;
668
669	x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
670	if (!x)
671		return 0;
672
673	if (type == NDISC_REDIRECT)
674		ip6_redirect(skb, net, skb->dev->ifindex, 0,
675			     sock_net_uid(net, NULL));
676	else
677		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
678	xfrm_state_put(x);
679
680	return 0;
681}
682
683static int ah6_init_state(struct xfrm_state *x)
684{
685	struct ah_data *ahp = NULL;
686	struct xfrm_algo_desc *aalg_desc;
687	struct crypto_ahash *ahash;
688
689	if (!x->aalg)
 
690		goto error;
 
691
692	if (x->encap)
 
693		goto error;
 
694
695	ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
696	if (!ahp)
697		return -ENOMEM;
698
699	ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
700	if (IS_ERR(ahash))
 
701		goto error;
 
702
703	ahp->ahash = ahash;
704	if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
705			       (x->aalg->alg_key_len + 7) / 8))
 
706		goto error;
 
707
708	/*
709	 * Lookup the algorithm description maintained by xfrm_algo,
710	 * verify crypto transform properties, and store information
711	 * we need for AH processing.  This lookup cannot fail here
712	 * after a successful crypto_alloc_hash().
713	 */
714	aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
715	BUG_ON(!aalg_desc);
716
717	if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
718	    crypto_ahash_digestsize(ahash)) {
719		pr_info("AH: %s digestsize %u != %hu\n",
720			x->aalg->alg_name, crypto_ahash_digestsize(ahash),
721			aalg_desc->uinfo.auth.icv_fullbits/8);
722		goto error;
723	}
724
725	ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
726	ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
727
728	x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
729					  ahp->icv_trunc_len);
730	switch (x->props.mode) {
731	case XFRM_MODE_BEET:
732	case XFRM_MODE_TRANSPORT:
733		break;
734	case XFRM_MODE_TUNNEL:
735		x->props.header_len += sizeof(struct ipv6hdr);
736		break;
737	default:
 
738		goto error;
739	}
740	x->data = ahp;
741
742	return 0;
743
744error:
745	if (ahp) {
746		crypto_free_ahash(ahp->ahash);
747		kfree(ahp);
748	}
749	return -EINVAL;
750}
751
752static void ah6_destroy(struct xfrm_state *x)
753{
754	struct ah_data *ahp = x->data;
755
756	if (!ahp)
757		return;
758
759	crypto_free_ahash(ahp->ahash);
760	kfree(ahp);
761}
762
763static int ah6_rcv_cb(struct sk_buff *skb, int err)
764{
765	return 0;
766}
767
768static const struct xfrm_type ah6_type = {
769	.description	= "AH6",
770	.owner		= THIS_MODULE,
771	.proto		= IPPROTO_AH,
772	.flags		= XFRM_TYPE_REPLAY_PROT,
773	.init_state	= ah6_init_state,
774	.destructor	= ah6_destroy,
775	.input		= ah6_input,
776	.output		= ah6_output,
777	.hdr_offset	= xfrm6_find_1stfragopt,
778};
779
780static struct xfrm6_protocol ah6_protocol = {
781	.handler	=	xfrm6_rcv,
 
782	.cb_handler	=	ah6_rcv_cb,
783	.err_handler	=	ah6_err,
784	.priority	=	0,
785};
786
787static int __init ah6_init(void)
788{
789	if (xfrm_register_type(&ah6_type, AF_INET6) < 0) {
790		pr_info("%s: can't add xfrm type\n", __func__);
791		return -EAGAIN;
792	}
793
794	if (xfrm6_protocol_register(&ah6_protocol, IPPROTO_AH) < 0) {
795		pr_info("%s: can't add protocol\n", __func__);
796		xfrm_unregister_type(&ah6_type, AF_INET6);
797		return -EAGAIN;
798	}
799
800	return 0;
801}
802
803static void __exit ah6_fini(void)
804{
805	if (xfrm6_protocol_deregister(&ah6_protocol, IPPROTO_AH) < 0)
806		pr_info("%s: can't remove protocol\n", __func__);
807
808	if (xfrm_unregister_type(&ah6_type, AF_INET6) < 0)
809		pr_info("%s: can't remove xfrm type\n", __func__);
810
811}
812
813module_init(ah6_init);
814module_exit(ah6_fini);
815
 
816MODULE_LICENSE("GPL");
817MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_AH);