Loading...
1#define pr_fmt(fmt) "IPsec: " fmt
2
3#include <crypto/algapi.h>
4#include <crypto/hash.h>
5#include <linux/err.h>
6#include <linux/module.h>
7#include <linux/slab.h>
8#include <net/ip.h>
9#include <net/xfrm.h>
10#include <net/ah.h>
11#include <linux/crypto.h>
12#include <linux/pfkeyv2.h>
13#include <linux/scatterlist.h>
14#include <net/icmp.h>
15#include <net/protocol.h>
16
17struct ah_skb_cb {
18 struct xfrm_skb_cb xfrm;
19 void *tmp;
20};
21
22#define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
23
24static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
25 unsigned int size)
26{
27 unsigned int len;
28
29 len = size + crypto_ahash_digestsize(ahash) +
30 (crypto_ahash_alignmask(ahash) &
31 ~(crypto_tfm_ctx_alignment() - 1));
32
33 len = ALIGN(len, crypto_tfm_ctx_alignment());
34
35 len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
36 len = ALIGN(len, __alignof__(struct scatterlist));
37
38 len += sizeof(struct scatterlist) * nfrags;
39
40 return kmalloc(len, GFP_ATOMIC);
41}
42
43static inline u8 *ah_tmp_auth(void *tmp, unsigned int offset)
44{
45 return tmp + offset;
46}
47
48static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
49 unsigned int offset)
50{
51 return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
52}
53
54static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
55 u8 *icv)
56{
57 struct ahash_request *req;
58
59 req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
60 crypto_tfm_ctx_alignment());
61
62 ahash_request_set_tfm(req, ahash);
63
64 return req;
65}
66
67static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
68 struct ahash_request *req)
69{
70 return (void *)ALIGN((unsigned long)(req + 1) +
71 crypto_ahash_reqsize(ahash),
72 __alignof__(struct scatterlist));
73}
74
75/* Clear mutable options and find final destination to substitute
76 * into IP header for icv calculation. Options are already checked
77 * for validity, so paranoia is not required. */
78
79static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
80{
81 unsigned char *optptr = (unsigned char *)(iph+1);
82 int l = iph->ihl*4 - sizeof(struct iphdr);
83 int optlen;
84
85 while (l > 0) {
86 switch (*optptr) {
87 case IPOPT_END:
88 return 0;
89 case IPOPT_NOOP:
90 l--;
91 optptr++;
92 continue;
93 }
94 optlen = optptr[1];
95 if (optlen<2 || optlen>l)
96 return -EINVAL;
97 switch (*optptr) {
98 case IPOPT_SEC:
99 case 0x85: /* Some "Extended Security" crap. */
100 case IPOPT_CIPSO:
101 case IPOPT_RA:
102 case 0x80|21: /* RFC1770 */
103 break;
104 case IPOPT_LSRR:
105 case IPOPT_SSRR:
106 if (optlen < 6)
107 return -EINVAL;
108 memcpy(daddr, optptr+optlen-4, 4);
109 /* Fall through */
110 default:
111 memset(optptr, 0, optlen);
112 }
113 l -= optlen;
114 optptr += optlen;
115 }
116 return 0;
117}
118
119static void ah_output_done(struct crypto_async_request *base, int err)
120{
121 u8 *icv;
122 struct iphdr *iph;
123 struct sk_buff *skb = base->data;
124 struct xfrm_state *x = skb_dst(skb)->xfrm;
125 struct ah_data *ahp = x->data;
126 struct iphdr *top_iph = ip_hdr(skb);
127 struct ip_auth_hdr *ah = ip_auth_hdr(skb);
128 int ihl = ip_hdrlen(skb);
129
130 iph = AH_SKB_CB(skb)->tmp;
131 icv = ah_tmp_icv(ahp->ahash, iph, ihl);
132 memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
133
134 top_iph->tos = iph->tos;
135 top_iph->ttl = iph->ttl;
136 top_iph->frag_off = iph->frag_off;
137 if (top_iph->ihl != 5) {
138 top_iph->daddr = iph->daddr;
139 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
140 }
141
142 kfree(AH_SKB_CB(skb)->tmp);
143 xfrm_output_resume(skb, err);
144}
145
146static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
147{
148 int err;
149 int nfrags;
150 int ihl;
151 u8 *icv;
152 struct sk_buff *trailer;
153 struct crypto_ahash *ahash;
154 struct ahash_request *req;
155 struct scatterlist *sg;
156 struct iphdr *iph, *top_iph;
157 struct ip_auth_hdr *ah;
158 struct ah_data *ahp;
159 int seqhi_len = 0;
160 __be32 *seqhi;
161 int sglists = 0;
162 struct scatterlist *seqhisg;
163
164 ahp = x->data;
165 ahash = ahp->ahash;
166
167 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
168 goto out;
169 nfrags = err;
170
171 skb_push(skb, -skb_network_offset(skb));
172 ah = ip_auth_hdr(skb);
173 ihl = ip_hdrlen(skb);
174
175 if (x->props.flags & XFRM_STATE_ESN) {
176 sglists = 1;
177 seqhi_len = sizeof(*seqhi);
178 }
179 err = -ENOMEM;
180 iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl + seqhi_len);
181 if (!iph)
182 goto out;
183 seqhi = (__be32 *)((char *)iph + ihl);
184 icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
185 req = ah_tmp_req(ahash, icv);
186 sg = ah_req_sg(ahash, req);
187 seqhisg = sg + nfrags;
188
189 memset(ah->auth_data, 0, ahp->icv_trunc_len);
190
191 top_iph = ip_hdr(skb);
192
193 iph->tos = top_iph->tos;
194 iph->ttl = top_iph->ttl;
195 iph->frag_off = top_iph->frag_off;
196
197 if (top_iph->ihl != 5) {
198 iph->daddr = top_iph->daddr;
199 memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
200 err = ip_clear_mutable_options(top_iph, &top_iph->daddr);
201 if (err)
202 goto out_free;
203 }
204
205 ah->nexthdr = *skb_mac_header(skb);
206 *skb_mac_header(skb) = IPPROTO_AH;
207
208 top_iph->tos = 0;
209 top_iph->tot_len = htons(skb->len);
210 top_iph->frag_off = 0;
211 top_iph->ttl = 0;
212 top_iph->check = 0;
213
214 if (x->props.flags & XFRM_STATE_ALIGN4)
215 ah->hdrlen = (XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
216 else
217 ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
218
219 ah->reserved = 0;
220 ah->spi = x->id.spi;
221 ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
222
223 sg_init_table(sg, nfrags + sglists);
224 err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
225 if (unlikely(err < 0))
226 goto out_free;
227
228 if (x->props.flags & XFRM_STATE_ESN) {
229 /* Attach seqhi sg right after packet payload */
230 *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
231 sg_set_buf(seqhisg, seqhi, seqhi_len);
232 }
233 ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
234 ahash_request_set_callback(req, 0, ah_output_done, skb);
235
236 AH_SKB_CB(skb)->tmp = iph;
237
238 err = crypto_ahash_digest(req);
239 if (err) {
240 if (err == -EINPROGRESS)
241 goto out;
242
243 if (err == -ENOSPC)
244 err = NET_XMIT_DROP;
245 goto out_free;
246 }
247
248 memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
249
250 top_iph->tos = iph->tos;
251 top_iph->ttl = iph->ttl;
252 top_iph->frag_off = iph->frag_off;
253 if (top_iph->ihl != 5) {
254 top_iph->daddr = iph->daddr;
255 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
256 }
257
258out_free:
259 kfree(iph);
260out:
261 return err;
262}
263
264static void ah_input_done(struct crypto_async_request *base, int err)
265{
266 u8 *auth_data;
267 u8 *icv;
268 struct iphdr *work_iph;
269 struct sk_buff *skb = base->data;
270 struct xfrm_state *x = xfrm_input_state(skb);
271 struct ah_data *ahp = x->data;
272 struct ip_auth_hdr *ah = ip_auth_hdr(skb);
273 int ihl = ip_hdrlen(skb);
274 int ah_hlen = (ah->hdrlen + 2) << 2;
275
276 if (err)
277 goto out;
278
279 work_iph = AH_SKB_CB(skb)->tmp;
280 auth_data = ah_tmp_auth(work_iph, ihl);
281 icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
282
283 err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
284 if (err)
285 goto out;
286
287 err = ah->nexthdr;
288
289 skb->network_header += ah_hlen;
290 memcpy(skb_network_header(skb), work_iph, ihl);
291 __skb_pull(skb, ah_hlen + ihl);
292
293 if (x->props.mode == XFRM_MODE_TUNNEL)
294 skb_reset_transport_header(skb);
295 else
296 skb_set_transport_header(skb, -ihl);
297out:
298 kfree(AH_SKB_CB(skb)->tmp);
299 xfrm_input_resume(skb, err);
300}
301
302static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
303{
304 int ah_hlen;
305 int ihl;
306 int nexthdr;
307 int nfrags;
308 u8 *auth_data;
309 u8 *icv;
310 struct sk_buff *trailer;
311 struct crypto_ahash *ahash;
312 struct ahash_request *req;
313 struct scatterlist *sg;
314 struct iphdr *iph, *work_iph;
315 struct ip_auth_hdr *ah;
316 struct ah_data *ahp;
317 int err = -ENOMEM;
318 int seqhi_len = 0;
319 __be32 *seqhi;
320 int sglists = 0;
321 struct scatterlist *seqhisg;
322
323 if (!pskb_may_pull(skb, sizeof(*ah)))
324 goto out;
325
326 ah = (struct ip_auth_hdr *)skb->data;
327 ahp = x->data;
328 ahash = ahp->ahash;
329
330 nexthdr = ah->nexthdr;
331 ah_hlen = (ah->hdrlen + 2) << 2;
332
333 if (x->props.flags & XFRM_STATE_ALIGN4) {
334 if (ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_full_len) &&
335 ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len))
336 goto out;
337 } else {
338 if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
339 ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
340 goto out;
341 }
342
343 if (!pskb_may_pull(skb, ah_hlen))
344 goto out;
345
346 /* We are going to _remove_ AH header to keep sockets happy,
347 * so... Later this can change. */
348 if (skb_unclone(skb, GFP_ATOMIC))
349 goto out;
350
351 skb->ip_summed = CHECKSUM_NONE;
352
353
354 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
355 goto out;
356 nfrags = err;
357
358 ah = (struct ip_auth_hdr *)skb->data;
359 iph = ip_hdr(skb);
360 ihl = ip_hdrlen(skb);
361
362 if (x->props.flags & XFRM_STATE_ESN) {
363 sglists = 1;
364 seqhi_len = sizeof(*seqhi);
365 }
366
367 work_iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl +
368 ahp->icv_trunc_len + seqhi_len);
369 if (!work_iph) {
370 err = -ENOMEM;
371 goto out;
372 }
373
374 seqhi = (__be32 *)((char *)work_iph + ihl);
375 auth_data = ah_tmp_auth(seqhi, seqhi_len);
376 icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len);
377 req = ah_tmp_req(ahash, icv);
378 sg = ah_req_sg(ahash, req);
379 seqhisg = sg + nfrags;
380
381 memcpy(work_iph, iph, ihl);
382 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
383 memset(ah->auth_data, 0, ahp->icv_trunc_len);
384
385 iph->ttl = 0;
386 iph->tos = 0;
387 iph->frag_off = 0;
388 iph->check = 0;
389 if (ihl > sizeof(*iph)) {
390 __be32 dummy;
391 err = ip_clear_mutable_options(iph, &dummy);
392 if (err)
393 goto out_free;
394 }
395
396 skb_push(skb, ihl);
397
398 sg_init_table(sg, nfrags + sglists);
399 err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
400 if (unlikely(err < 0))
401 goto out_free;
402
403 if (x->props.flags & XFRM_STATE_ESN) {
404 /* Attach seqhi sg right after packet payload */
405 *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
406 sg_set_buf(seqhisg, seqhi, seqhi_len);
407 }
408 ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
409 ahash_request_set_callback(req, 0, ah_input_done, skb);
410
411 AH_SKB_CB(skb)->tmp = work_iph;
412
413 err = crypto_ahash_digest(req);
414 if (err) {
415 if (err == -EINPROGRESS)
416 goto out;
417
418 goto out_free;
419 }
420
421 err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
422 if (err)
423 goto out_free;
424
425 skb->network_header += ah_hlen;
426 memcpy(skb_network_header(skb), work_iph, ihl);
427 __skb_pull(skb, ah_hlen + ihl);
428 if (x->props.mode == XFRM_MODE_TUNNEL)
429 skb_reset_transport_header(skb);
430 else
431 skb_set_transport_header(skb, -ihl);
432
433 err = nexthdr;
434
435out_free:
436 kfree (work_iph);
437out:
438 return err;
439}
440
441static int ah4_err(struct sk_buff *skb, u32 info)
442{
443 struct net *net = dev_net(skb->dev);
444 const struct iphdr *iph = (const struct iphdr *)skb->data;
445 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
446 struct xfrm_state *x;
447
448 switch (icmp_hdr(skb)->type) {
449 case ICMP_DEST_UNREACH:
450 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
451 return 0;
452 case ICMP_REDIRECT:
453 break;
454 default:
455 return 0;
456 }
457
458 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
459 ah->spi, IPPROTO_AH, AF_INET);
460 if (!x)
461 return 0;
462
463 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
464 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
465 else
466 ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);
467 xfrm_state_put(x);
468
469 return 0;
470}
471
472static int ah_init_state(struct xfrm_state *x)
473{
474 struct ah_data *ahp = NULL;
475 struct xfrm_algo_desc *aalg_desc;
476 struct crypto_ahash *ahash;
477
478 if (!x->aalg)
479 goto error;
480
481 if (x->encap)
482 goto error;
483
484 ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
485 if (!ahp)
486 return -ENOMEM;
487
488 ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
489 if (IS_ERR(ahash))
490 goto error;
491
492 ahp->ahash = ahash;
493 if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
494 (x->aalg->alg_key_len + 7) / 8))
495 goto error;
496
497 /*
498 * Lookup the algorithm description maintained by xfrm_algo,
499 * verify crypto transform properties, and store information
500 * we need for AH processing. This lookup cannot fail here
501 * after a successful crypto_alloc_ahash().
502 */
503 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
504 BUG_ON(!aalg_desc);
505
506 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
507 crypto_ahash_digestsize(ahash)) {
508 pr_info("%s: %s digestsize %u != %hu\n",
509 __func__, x->aalg->alg_name,
510 crypto_ahash_digestsize(ahash),
511 aalg_desc->uinfo.auth.icv_fullbits / 8);
512 goto error;
513 }
514
515 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
516 ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
517
518 if (x->props.flags & XFRM_STATE_ALIGN4)
519 x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) +
520 ahp->icv_trunc_len);
521 else
522 x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
523 ahp->icv_trunc_len);
524 if (x->props.mode == XFRM_MODE_TUNNEL)
525 x->props.header_len += sizeof(struct iphdr);
526 x->data = ahp;
527
528 return 0;
529
530error:
531 if (ahp) {
532 crypto_free_ahash(ahp->ahash);
533 kfree(ahp);
534 }
535 return -EINVAL;
536}
537
538static void ah_destroy(struct xfrm_state *x)
539{
540 struct ah_data *ahp = x->data;
541
542 if (!ahp)
543 return;
544
545 crypto_free_ahash(ahp->ahash);
546 kfree(ahp);
547}
548
549static int ah4_rcv_cb(struct sk_buff *skb, int err)
550{
551 return 0;
552}
553
554static const struct xfrm_type ah_type =
555{
556 .description = "AH4",
557 .owner = THIS_MODULE,
558 .proto = IPPROTO_AH,
559 .flags = XFRM_TYPE_REPLAY_PROT,
560 .init_state = ah_init_state,
561 .destructor = ah_destroy,
562 .input = ah_input,
563 .output = ah_output
564};
565
566static struct xfrm4_protocol ah4_protocol = {
567 .handler = xfrm4_rcv,
568 .input_handler = xfrm_input,
569 .cb_handler = ah4_rcv_cb,
570 .err_handler = ah4_err,
571 .priority = 0,
572};
573
574static int __init ah4_init(void)
575{
576 if (xfrm_register_type(&ah_type, AF_INET) < 0) {
577 pr_info("%s: can't add xfrm type\n", __func__);
578 return -EAGAIN;
579 }
580 if (xfrm4_protocol_register(&ah4_protocol, IPPROTO_AH) < 0) {
581 pr_info("%s: can't add protocol\n", __func__);
582 xfrm_unregister_type(&ah_type, AF_INET);
583 return -EAGAIN;
584 }
585 return 0;
586}
587
588static void __exit ah4_fini(void)
589{
590 if (xfrm4_protocol_deregister(&ah4_protocol, IPPROTO_AH) < 0)
591 pr_info("%s: can't remove protocol\n", __func__);
592 if (xfrm_unregister_type(&ah_type, AF_INET) < 0)
593 pr_info("%s: can't remove xfrm type\n", __func__);
594}
595
596module_init(ah4_init);
597module_exit(ah4_fini);
598MODULE_LICENSE("GPL");
599MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_AH);
1#include <crypto/hash.h>
2#include <linux/err.h>
3#include <linux/module.h>
4#include <linux/slab.h>
5#include <net/ip.h>
6#include <net/xfrm.h>
7#include <net/ah.h>
8#include <linux/crypto.h>
9#include <linux/pfkeyv2.h>
10#include <linux/scatterlist.h>
11#include <net/icmp.h>
12#include <net/protocol.h>
13
14struct ah_skb_cb {
15 struct xfrm_skb_cb xfrm;
16 void *tmp;
17};
18
19#define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
20
21static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
22 unsigned int size)
23{
24 unsigned int len;
25
26 len = size + crypto_ahash_digestsize(ahash) +
27 (crypto_ahash_alignmask(ahash) &
28 ~(crypto_tfm_ctx_alignment() - 1));
29
30 len = ALIGN(len, crypto_tfm_ctx_alignment());
31
32 len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
33 len = ALIGN(len, __alignof__(struct scatterlist));
34
35 len += sizeof(struct scatterlist) * nfrags;
36
37 return kmalloc(len, GFP_ATOMIC);
38}
39
40static inline u8 *ah_tmp_auth(void *tmp, unsigned int offset)
41{
42 return tmp + offset;
43}
44
45static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
46 unsigned int offset)
47{
48 return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
49}
50
51static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
52 u8 *icv)
53{
54 struct ahash_request *req;
55
56 req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
57 crypto_tfm_ctx_alignment());
58
59 ahash_request_set_tfm(req, ahash);
60
61 return req;
62}
63
64static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
65 struct ahash_request *req)
66{
67 return (void *)ALIGN((unsigned long)(req + 1) +
68 crypto_ahash_reqsize(ahash),
69 __alignof__(struct scatterlist));
70}
71
72/* Clear mutable options and find final destination to substitute
73 * into IP header for icv calculation. Options are already checked
74 * for validity, so paranoia is not required. */
75
76static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
77{
78 unsigned char * optptr = (unsigned char*)(iph+1);
79 int l = iph->ihl*4 - sizeof(struct iphdr);
80 int optlen;
81
82 while (l > 0) {
83 switch (*optptr) {
84 case IPOPT_END:
85 return 0;
86 case IPOPT_NOOP:
87 l--;
88 optptr++;
89 continue;
90 }
91 optlen = optptr[1];
92 if (optlen<2 || optlen>l)
93 return -EINVAL;
94 switch (*optptr) {
95 case IPOPT_SEC:
96 case 0x85: /* Some "Extended Security" crap. */
97 case IPOPT_CIPSO:
98 case IPOPT_RA:
99 case 0x80|21: /* RFC1770 */
100 break;
101 case IPOPT_LSRR:
102 case IPOPT_SSRR:
103 if (optlen < 6)
104 return -EINVAL;
105 memcpy(daddr, optptr+optlen-4, 4);
106 /* Fall through */
107 default:
108 memset(optptr, 0, optlen);
109 }
110 l -= optlen;
111 optptr += optlen;
112 }
113 return 0;
114}
115
116static void ah_output_done(struct crypto_async_request *base, int err)
117{
118 u8 *icv;
119 struct iphdr *iph;
120 struct sk_buff *skb = base->data;
121 struct xfrm_state *x = skb_dst(skb)->xfrm;
122 struct ah_data *ahp = x->data;
123 struct iphdr *top_iph = ip_hdr(skb);
124 struct ip_auth_hdr *ah = ip_auth_hdr(skb);
125 int ihl = ip_hdrlen(skb);
126
127 iph = AH_SKB_CB(skb)->tmp;
128 icv = ah_tmp_icv(ahp->ahash, iph, ihl);
129 memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
130
131 top_iph->tos = iph->tos;
132 top_iph->ttl = iph->ttl;
133 top_iph->frag_off = iph->frag_off;
134 if (top_iph->ihl != 5) {
135 top_iph->daddr = iph->daddr;
136 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
137 }
138
139 err = ah->nexthdr;
140
141 kfree(AH_SKB_CB(skb)->tmp);
142 xfrm_output_resume(skb, err);
143}
144
145static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
146{
147 int err;
148 int nfrags;
149 int ihl;
150 u8 *icv;
151 struct sk_buff *trailer;
152 struct crypto_ahash *ahash;
153 struct ahash_request *req;
154 struct scatterlist *sg;
155 struct iphdr *iph, *top_iph;
156 struct ip_auth_hdr *ah;
157 struct ah_data *ahp;
158
159 ahp = x->data;
160 ahash = ahp->ahash;
161
162 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
163 goto out;
164 nfrags = err;
165
166 skb_push(skb, -skb_network_offset(skb));
167 ah = ip_auth_hdr(skb);
168 ihl = ip_hdrlen(skb);
169
170 err = -ENOMEM;
171 iph = ah_alloc_tmp(ahash, nfrags, ihl);
172 if (!iph)
173 goto out;
174
175 icv = ah_tmp_icv(ahash, iph, ihl);
176 req = ah_tmp_req(ahash, icv);
177 sg = ah_req_sg(ahash, req);
178
179 memset(ah->auth_data, 0, ahp->icv_trunc_len);
180
181 top_iph = ip_hdr(skb);
182
183 iph->tos = top_iph->tos;
184 iph->ttl = top_iph->ttl;
185 iph->frag_off = top_iph->frag_off;
186
187 if (top_iph->ihl != 5) {
188 iph->daddr = top_iph->daddr;
189 memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
190 err = ip_clear_mutable_options(top_iph, &top_iph->daddr);
191 if (err)
192 goto out_free;
193 }
194
195 ah->nexthdr = *skb_mac_header(skb);
196 *skb_mac_header(skb) = IPPROTO_AH;
197
198 top_iph->tos = 0;
199 top_iph->tot_len = htons(skb->len);
200 top_iph->frag_off = 0;
201 top_iph->ttl = 0;
202 top_iph->check = 0;
203
204 if (x->props.flags & XFRM_STATE_ALIGN4)
205 ah->hdrlen = (XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
206 else
207 ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
208
209 ah->reserved = 0;
210 ah->spi = x->id.spi;
211 ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
212
213 sg_init_table(sg, nfrags);
214 skb_to_sgvec(skb, sg, 0, skb->len);
215
216 ahash_request_set_crypt(req, sg, icv, skb->len);
217 ahash_request_set_callback(req, 0, ah_output_done, skb);
218
219 AH_SKB_CB(skb)->tmp = iph;
220
221 err = crypto_ahash_digest(req);
222 if (err) {
223 if (err == -EINPROGRESS)
224 goto out;
225
226 if (err == -EBUSY)
227 err = NET_XMIT_DROP;
228 goto out_free;
229 }
230
231 memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
232
233 top_iph->tos = iph->tos;
234 top_iph->ttl = iph->ttl;
235 top_iph->frag_off = iph->frag_off;
236 if (top_iph->ihl != 5) {
237 top_iph->daddr = iph->daddr;
238 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
239 }
240
241out_free:
242 kfree(iph);
243out:
244 return err;
245}
246
247static void ah_input_done(struct crypto_async_request *base, int err)
248{
249 u8 *auth_data;
250 u8 *icv;
251 struct iphdr *work_iph;
252 struct sk_buff *skb = base->data;
253 struct xfrm_state *x = xfrm_input_state(skb);
254 struct ah_data *ahp = x->data;
255 struct ip_auth_hdr *ah = ip_auth_hdr(skb);
256 int ihl = ip_hdrlen(skb);
257 int ah_hlen = (ah->hdrlen + 2) << 2;
258
259 work_iph = AH_SKB_CB(skb)->tmp;
260 auth_data = ah_tmp_auth(work_iph, ihl);
261 icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
262
263 err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
264 if (err)
265 goto out;
266
267 skb->network_header += ah_hlen;
268 memcpy(skb_network_header(skb), work_iph, ihl);
269 __skb_pull(skb, ah_hlen + ihl);
270 skb_set_transport_header(skb, -ihl);
271
272 err = ah->nexthdr;
273out:
274 kfree(AH_SKB_CB(skb)->tmp);
275 xfrm_input_resume(skb, err);
276}
277
278static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
279{
280 int ah_hlen;
281 int ihl;
282 int nexthdr;
283 int nfrags;
284 u8 *auth_data;
285 u8 *icv;
286 struct sk_buff *trailer;
287 struct crypto_ahash *ahash;
288 struct ahash_request *req;
289 struct scatterlist *sg;
290 struct iphdr *iph, *work_iph;
291 struct ip_auth_hdr *ah;
292 struct ah_data *ahp;
293 int err = -ENOMEM;
294
295 if (!pskb_may_pull(skb, sizeof(*ah)))
296 goto out;
297
298 ah = (struct ip_auth_hdr *)skb->data;
299 ahp = x->data;
300 ahash = ahp->ahash;
301
302 nexthdr = ah->nexthdr;
303 ah_hlen = (ah->hdrlen + 2) << 2;
304
305 if (x->props.flags & XFRM_STATE_ALIGN4) {
306 if (ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_full_len) &&
307 ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len))
308 goto out;
309 } else {
310 if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
311 ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
312 goto out;
313 }
314
315 if (!pskb_may_pull(skb, ah_hlen))
316 goto out;
317
318 /* We are going to _remove_ AH header to keep sockets happy,
319 * so... Later this can change. */
320 if (skb_cloned(skb) &&
321 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
322 goto out;
323
324 skb->ip_summed = CHECKSUM_NONE;
325
326
327 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
328 goto out;
329 nfrags = err;
330
331 ah = (struct ip_auth_hdr *)skb->data;
332 iph = ip_hdr(skb);
333 ihl = ip_hdrlen(skb);
334
335 work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len);
336 if (!work_iph)
337 goto out;
338
339 auth_data = ah_tmp_auth(work_iph, ihl);
340 icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len);
341 req = ah_tmp_req(ahash, icv);
342 sg = ah_req_sg(ahash, req);
343
344 memcpy(work_iph, iph, ihl);
345 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
346 memset(ah->auth_data, 0, ahp->icv_trunc_len);
347
348 iph->ttl = 0;
349 iph->tos = 0;
350 iph->frag_off = 0;
351 iph->check = 0;
352 if (ihl > sizeof(*iph)) {
353 __be32 dummy;
354 err = ip_clear_mutable_options(iph, &dummy);
355 if (err)
356 goto out_free;
357 }
358
359 skb_push(skb, ihl);
360
361 sg_init_table(sg, nfrags);
362 skb_to_sgvec(skb, sg, 0, skb->len);
363
364 ahash_request_set_crypt(req, sg, icv, skb->len);
365 ahash_request_set_callback(req, 0, ah_input_done, skb);
366
367 AH_SKB_CB(skb)->tmp = work_iph;
368
369 err = crypto_ahash_digest(req);
370 if (err) {
371 if (err == -EINPROGRESS)
372 goto out;
373
374 if (err == -EBUSY)
375 err = NET_XMIT_DROP;
376 goto out_free;
377 }
378
379 err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
380 if (err)
381 goto out_free;
382
383 skb->network_header += ah_hlen;
384 memcpy(skb_network_header(skb), work_iph, ihl);
385 __skb_pull(skb, ah_hlen + ihl);
386 skb_set_transport_header(skb, -ihl);
387
388 err = nexthdr;
389
390out_free:
391 kfree (work_iph);
392out:
393 return err;
394}
395
396static void ah4_err(struct sk_buff *skb, u32 info)
397{
398 struct net *net = dev_net(skb->dev);
399 const struct iphdr *iph = (const struct iphdr *)skb->data;
400 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
401 struct xfrm_state *x;
402
403 if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
404 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
405 return;
406
407 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
408 ah->spi, IPPROTO_AH, AF_INET);
409 if (!x)
410 return;
411 printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n",
412 ntohl(ah->spi), ntohl(iph->daddr));
413 xfrm_state_put(x);
414}
415
416static int ah_init_state(struct xfrm_state *x)
417{
418 struct ah_data *ahp = NULL;
419 struct xfrm_algo_desc *aalg_desc;
420 struct crypto_ahash *ahash;
421
422 if (!x->aalg)
423 goto error;
424
425 if (x->encap)
426 goto error;
427
428 ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
429 if (!ahp)
430 return -ENOMEM;
431
432 ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
433 if (IS_ERR(ahash))
434 goto error;
435
436 ahp->ahash = ahash;
437 if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
438 (x->aalg->alg_key_len + 7) / 8))
439 goto error;
440
441 /*
442 * Lookup the algorithm description maintained by xfrm_algo,
443 * verify crypto transform properties, and store information
444 * we need for AH processing. This lookup cannot fail here
445 * after a successful crypto_alloc_ahash().
446 */
447 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
448 BUG_ON(!aalg_desc);
449
450 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
451 crypto_ahash_digestsize(ahash)) {
452 printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
453 x->aalg->alg_name, crypto_ahash_digestsize(ahash),
454 aalg_desc->uinfo.auth.icv_fullbits/8);
455 goto error;
456 }
457
458 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
459 ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
460
461 BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
462
463 if (x->props.flags & XFRM_STATE_ALIGN4)
464 x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) +
465 ahp->icv_trunc_len);
466 else
467 x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
468 ahp->icv_trunc_len);
469 if (x->props.mode == XFRM_MODE_TUNNEL)
470 x->props.header_len += sizeof(struct iphdr);
471 x->data = ahp;
472
473 return 0;
474
475error:
476 if (ahp) {
477 crypto_free_ahash(ahp->ahash);
478 kfree(ahp);
479 }
480 return -EINVAL;
481}
482
483static void ah_destroy(struct xfrm_state *x)
484{
485 struct ah_data *ahp = x->data;
486
487 if (!ahp)
488 return;
489
490 crypto_free_ahash(ahp->ahash);
491 kfree(ahp);
492}
493
494
495static const struct xfrm_type ah_type =
496{
497 .description = "AH4",
498 .owner = THIS_MODULE,
499 .proto = IPPROTO_AH,
500 .flags = XFRM_TYPE_REPLAY_PROT,
501 .init_state = ah_init_state,
502 .destructor = ah_destroy,
503 .input = ah_input,
504 .output = ah_output
505};
506
507static const struct net_protocol ah4_protocol = {
508 .handler = xfrm4_rcv,
509 .err_handler = ah4_err,
510 .no_policy = 1,
511 .netns_ok = 1,
512};
513
514static int __init ah4_init(void)
515{
516 if (xfrm_register_type(&ah_type, AF_INET) < 0) {
517 printk(KERN_INFO "ip ah init: can't add xfrm type\n");
518 return -EAGAIN;
519 }
520 if (inet_add_protocol(&ah4_protocol, IPPROTO_AH) < 0) {
521 printk(KERN_INFO "ip ah init: can't add protocol\n");
522 xfrm_unregister_type(&ah_type, AF_INET);
523 return -EAGAIN;
524 }
525 return 0;
526}
527
528static void __exit ah4_fini(void)
529{
530 if (inet_del_protocol(&ah4_protocol, IPPROTO_AH) < 0)
531 printk(KERN_INFO "ip ah close: can't remove protocol\n");
532 if (xfrm_unregister_type(&ah_type, AF_INET) < 0)
533 printk(KERN_INFO "ip ah close: can't remove xfrm type\n");
534}
535
536module_init(ah4_init);
537module_exit(ah4_fini);
538MODULE_LICENSE("GPL");
539MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_AH);