Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (C)2002 USAGI/WIDE Project
   4 *
   5 * Authors
   6 *
   7 *	Mitsuru KANDA @USAGI       : IPv6 Support
   8 *	Kazunori MIYAZAWA @USAGI   :
   9 *	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  10 *
  11 *	This file is derived from net/ipv4/esp.c
  12 */
  13
  14#define pr_fmt(fmt) "IPv6: " fmt
  15
  16#include <crypto/aead.h>
  17#include <crypto/authenc.h>
  18#include <linux/err.h>
  19#include <linux/module.h>
  20#include <net/ip.h>
  21#include <net/xfrm.h>
  22#include <net/esp.h>
  23#include <linux/scatterlist.h>
  24#include <linux/kernel.h>
  25#include <linux/pfkeyv2.h>
  26#include <linux/random.h>
  27#include <linux/slab.h>
  28#include <linux/spinlock.h>
  29#include <net/ip6_checksum.h>
  30#include <net/ip6_route.h>
  31#include <net/icmp.h>
  32#include <net/ipv6.h>
  33#include <net/protocol.h>
  34#include <net/udp.h>
  35#include <linux/icmpv6.h>
  36#include <net/tcp.h>
  37#include <net/espintcp.h>
  38#include <net/inet6_hashtables.h>
 
  39
  40#include <linux/highmem.h>
  41
  42struct esp_skb_cb {
  43	struct xfrm_skb_cb xfrm;
  44	void *tmp;
  45};
  46
  47struct esp_output_extra {
  48	__be32 seqhi;
  49	u32 esphoff;
  50};
  51
  52#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
  53
  54/*
  55 * Allocate an AEAD request structure with extra space for SG and IV.
  56 *
  57 * For alignment considerations the upper 32 bits of the sequence number are
  58 * placed at the front, if present. Followed by the IV, the request and finally
  59 * the SG list.
  60 *
  61 * TODO: Use spare space in skb for this where possible.
  62 */
  63static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
  64{
  65	unsigned int len;
  66
  67	len = seqihlen;
  68
  69	len += crypto_aead_ivsize(aead);
  70
  71	if (len) {
  72		len += crypto_aead_alignmask(aead) &
  73		       ~(crypto_tfm_ctx_alignment() - 1);
  74		len = ALIGN(len, crypto_tfm_ctx_alignment());
  75	}
  76
  77	len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
  78	len = ALIGN(len, __alignof__(struct scatterlist));
  79
  80	len += sizeof(struct scatterlist) * nfrags;
  81
  82	return kmalloc(len, GFP_ATOMIC);
  83}
  84
  85static inline void *esp_tmp_extra(void *tmp)
  86{
  87	return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
  88}
  89
  90static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
  91{
  92	return crypto_aead_ivsize(aead) ?
  93	       PTR_ALIGN((u8 *)tmp + seqhilen,
  94			 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
  95}
  96
  97static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
  98{
  99	struct aead_request *req;
 100
 101	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
 102				crypto_tfm_ctx_alignment());
 103	aead_request_set_tfm(req, aead);
 104	return req;
 105}
 106
 107static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
 108					     struct aead_request *req)
 109{
 110	return (void *)ALIGN((unsigned long)(req + 1) +
 111			     crypto_aead_reqsize(aead),
 112			     __alignof__(struct scatterlist));
 113}
 114
 115static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
 116{
 117	struct esp_output_extra *extra = esp_tmp_extra(tmp);
 118	struct crypto_aead *aead = x->data;
 119	int extralen = 0;
 120	u8 *iv;
 121	struct aead_request *req;
 122	struct scatterlist *sg;
 123
 124	if (x->props.flags & XFRM_STATE_ESN)
 125		extralen += sizeof(*extra);
 126
 127	iv = esp_tmp_iv(aead, tmp, extralen);
 128	req = esp_tmp_req(aead, iv);
 129
 130	/* Unref skb_frag_pages in the src scatterlist if necessary.
 131	 * Skip the first sg which comes from skb->data.
 132	 */
 133	if (req->src != req->dst)
 134		for (sg = sg_next(req->src); sg; sg = sg_next(sg))
 135			put_page(sg_page(sg));
 
 136}
 137
 138#ifdef CONFIG_INET6_ESPINTCP
 139struct esp_tcp_sk {
 140	struct sock *sk;
 141	struct rcu_head rcu;
 142};
 143
 144static void esp_free_tcp_sk(struct rcu_head *head)
 145{
 146	struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
 147
 148	sock_put(esk->sk);
 149	kfree(esk);
 150}
 151
 152static struct sock *esp6_find_tcp_sk(struct xfrm_state *x)
 153{
 154	struct xfrm_encap_tmpl *encap = x->encap;
 
 155	struct esp_tcp_sk *esk;
 156	__be16 sport, dport;
 157	struct sock *nsk;
 158	struct sock *sk;
 159
 160	sk = rcu_dereference(x->encap_sk);
 161	if (sk && sk->sk_state == TCP_ESTABLISHED)
 162		return sk;
 163
 164	spin_lock_bh(&x->lock);
 165	sport = encap->encap_sport;
 166	dport = encap->encap_dport;
 167	nsk = rcu_dereference_protected(x->encap_sk,
 168					lockdep_is_held(&x->lock));
 169	if (sk && sk == nsk) {
 170		esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
 171		if (!esk) {
 172			spin_unlock_bh(&x->lock);
 173			return ERR_PTR(-ENOMEM);
 174		}
 175		RCU_INIT_POINTER(x->encap_sk, NULL);
 176		esk->sk = sk;
 177		call_rcu(&esk->rcu, esp_free_tcp_sk);
 178	}
 179	spin_unlock_bh(&x->lock);
 180
 181	sk = __inet6_lookup_established(xs_net(x), &tcp_hashinfo, &x->id.daddr.in6,
 182					dport, &x->props.saddr.in6, ntohs(sport), 0, 0);
 183	if (!sk)
 184		return ERR_PTR(-ENOENT);
 185
 186	if (!tcp_is_ulp_esp(sk)) {
 187		sock_put(sk);
 188		return ERR_PTR(-EINVAL);
 189	}
 190
 191	spin_lock_bh(&x->lock);
 192	nsk = rcu_dereference_protected(x->encap_sk,
 193					lockdep_is_held(&x->lock));
 194	if (encap->encap_sport != sport ||
 195	    encap->encap_dport != dport) {
 196		sock_put(sk);
 197		sk = nsk ?: ERR_PTR(-EREMCHG);
 198	} else if (sk == nsk) {
 199		sock_put(sk);
 200	} else {
 201		rcu_assign_pointer(x->encap_sk, sk);
 202	}
 203	spin_unlock_bh(&x->lock);
 204
 205	return sk;
 206}
 207
 208static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
 209{
 210	struct sock *sk;
 211	int err;
 212
 213	rcu_read_lock();
 214
 215	sk = esp6_find_tcp_sk(x);
 216	err = PTR_ERR_OR_ZERO(sk);
 217	if (err)
 218		goto out;
 219
 220	bh_lock_sock(sk);
 221	if (sock_owned_by_user(sk))
 222		err = espintcp_queue_out(sk, skb);
 223	else
 224		err = espintcp_push_skb(sk, skb);
 225	bh_unlock_sock(sk);
 226
 227out:
 228	rcu_read_unlock();
 229	return err;
 230}
 231
 232static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk,
 233				   struct sk_buff *skb)
 234{
 235	struct dst_entry *dst = skb_dst(skb);
 236	struct xfrm_state *x = dst->xfrm;
 237
 238	return esp_output_tcp_finish(x, skb);
 239}
 240
 241static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
 242{
 243	int err;
 244
 245	local_bh_disable();
 246	err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb);
 247	local_bh_enable();
 248
 249	/* EINPROGRESS just happens to do the right thing.  It
 250	 * actually means that the skb has been consumed and
 251	 * isn't coming back.
 252	 */
 253	return err ?: -EINPROGRESS;
 254}
 255#else
 256static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
 257{
 258	kfree_skb(skb);
 259
 260	return -EOPNOTSUPP;
 261}
 262#endif
 263
 264static void esp_output_encap_csum(struct sk_buff *skb)
 265{
 266	/* UDP encap with IPv6 requires a valid checksum */
 267	if (*skb_mac_header(skb) == IPPROTO_UDP) {
 268		struct udphdr *uh = udp_hdr(skb);
 269		struct ipv6hdr *ip6h = ipv6_hdr(skb);
 270		int len = ntohs(uh->len);
 271		unsigned int offset = skb_transport_offset(skb);
 272		__wsum csum = skb_checksum(skb, offset, skb->len - offset, 0);
 273
 274		uh->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
 275					    len, IPPROTO_UDP, csum);
 276		if (uh->check == 0)
 277			uh->check = CSUM_MANGLED_0;
 278	}
 279}
 280
 281static void esp_output_done(struct crypto_async_request *base, int err)
 282{
 283	struct sk_buff *skb = base->data;
 284	struct xfrm_offload *xo = xfrm_offload(skb);
 285	void *tmp;
 286	struct xfrm_state *x;
 287
 288	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
 289		struct sec_path *sp = skb_sec_path(skb);
 290
 291		x = sp->xvec[sp->len - 1];
 292	} else {
 293		x = skb_dst(skb)->xfrm;
 294	}
 295
 296	tmp = ESP_SKB_CB(skb)->tmp;
 297	esp_ssg_unref(x, tmp);
 298	kfree(tmp);
 299
 300	esp_output_encap_csum(skb);
 301
 302	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
 303		if (err) {
 304			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
 305			kfree_skb(skb);
 306			return;
 307		}
 308
 309		skb_push(skb, skb->data - skb_mac_header(skb));
 310		secpath_reset(skb);
 311		xfrm_dev_resume(skb);
 312	} else {
 313		if (!err &&
 314		    x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
 315			esp_output_tail_tcp(x, skb);
 316		else
 317			xfrm_output_resume(skb->sk, skb, err);
 318	}
 319}
 320
 321/* Move ESP header back into place. */
 322static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
 323{
 324	struct ip_esp_hdr *esph = (void *)(skb->data + offset);
 325	void *tmp = ESP_SKB_CB(skb)->tmp;
 326	__be32 *seqhi = esp_tmp_extra(tmp);
 327
 328	esph->seq_no = esph->spi;
 329	esph->spi = *seqhi;
 330}
 331
 332static void esp_output_restore_header(struct sk_buff *skb)
 333{
 334	void *tmp = ESP_SKB_CB(skb)->tmp;
 335	struct esp_output_extra *extra = esp_tmp_extra(tmp);
 336
 337	esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
 338				sizeof(__be32));
 339}
 340
 341static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
 342					     struct xfrm_state *x,
 343					     struct ip_esp_hdr *esph,
 344					     struct esp_output_extra *extra)
 345{
 346	/* For ESN we move the header forward by 4 bytes to
 347	 * accomodate the high bits.  We will move it back after
 348	 * encryption.
 349	 */
 350	if ((x->props.flags & XFRM_STATE_ESN)) {
 351		__u32 seqhi;
 352		struct xfrm_offload *xo = xfrm_offload(skb);
 353
 354		if (xo)
 355			seqhi = xo->seq.hi;
 356		else
 357			seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
 358
 359		extra->esphoff = (unsigned char *)esph -
 360				 skb_transport_header(skb);
 361		esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
 362		extra->seqhi = esph->spi;
 363		esph->seq_no = htonl(seqhi);
 364	}
 365
 366	esph->spi = x->id.spi;
 367
 368	return esph;
 369}
 370
 371static void esp_output_done_esn(struct crypto_async_request *base, int err)
 372{
 373	struct sk_buff *skb = base->data;
 374
 375	esp_output_restore_header(skb);
 376	esp_output_done(base, err);
 377}
 378
 379static struct ip_esp_hdr *esp6_output_udp_encap(struct sk_buff *skb,
 380					       int encap_type,
 381					       struct esp_info *esp,
 382					       __be16 sport,
 383					       __be16 dport)
 384{
 385	struct udphdr *uh;
 386	__be32 *udpdata32;
 387	unsigned int len;
 388
 389	len = skb->len + esp->tailen - skb_transport_offset(skb);
 390	if (len > U16_MAX)
 391		return ERR_PTR(-EMSGSIZE);
 392
 393	uh = (struct udphdr *)esp->esph;
 394	uh->source = sport;
 395	uh->dest = dport;
 396	uh->len = htons(len);
 397	uh->check = 0;
 398
 399	*skb_mac_header(skb) = IPPROTO_UDP;
 400
 401	if (encap_type == UDP_ENCAP_ESPINUDP_NON_IKE) {
 402		udpdata32 = (__be32 *)(uh + 1);
 403		udpdata32[0] = udpdata32[1] = 0;
 404		return (struct ip_esp_hdr *)(udpdata32 + 2);
 405	}
 406
 407	return (struct ip_esp_hdr *)(uh + 1);
 408}
 409
 410#ifdef CONFIG_INET6_ESPINTCP
 411static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
 412						struct sk_buff *skb,
 413						struct esp_info *esp)
 414{
 415	__be16 *lenp = (void *)esp->esph;
 416	struct ip_esp_hdr *esph;
 417	unsigned int len;
 418	struct sock *sk;
 419
 420	len = skb->len + esp->tailen - skb_transport_offset(skb);
 421	if (len > IP_MAX_MTU)
 422		return ERR_PTR(-EMSGSIZE);
 423
 424	rcu_read_lock();
 425	sk = esp6_find_tcp_sk(x);
 426	rcu_read_unlock();
 427
 428	if (IS_ERR(sk))
 429		return ERR_CAST(sk);
 430
 431	*lenp = htons(len);
 432	esph = (struct ip_esp_hdr *)(lenp + 1);
 433
 434	return esph;
 435}
 436#else
 437static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
 438						struct sk_buff *skb,
 439						struct esp_info *esp)
 440{
 441	return ERR_PTR(-EOPNOTSUPP);
 442}
 443#endif
 444
 445static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb,
 446			    struct esp_info *esp)
 447{
 448	struct xfrm_encap_tmpl *encap = x->encap;
 449	struct ip_esp_hdr *esph;
 450	__be16 sport, dport;
 451	int encap_type;
 452
 453	spin_lock_bh(&x->lock);
 454	sport = encap->encap_sport;
 455	dport = encap->encap_dport;
 456	encap_type = encap->encap_type;
 457	spin_unlock_bh(&x->lock);
 458
 459	switch (encap_type) {
 460	default:
 461	case UDP_ENCAP_ESPINUDP:
 462	case UDP_ENCAP_ESPINUDP_NON_IKE:
 463		esph = esp6_output_udp_encap(skb, encap_type, esp, sport, dport);
 464		break;
 465	case TCP_ENCAP_ESPINTCP:
 466		esph = esp6_output_tcp_encap(x, skb, esp);
 467		break;
 468	}
 469
 470	if (IS_ERR(esph))
 471		return PTR_ERR(esph);
 472
 473	esp->esph = esph;
 474
 475	return 0;
 476}
 477
 478int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
 479{
 480	u8 *tail;
 481	int nfrags;
 482	int esph_offset;
 483	struct page *page;
 484	struct sk_buff *trailer;
 485	int tailen = esp->tailen;
 486
 487	if (x->encap) {
 488		int err = esp6_output_encap(x, skb, esp);
 489
 490		if (err < 0)
 491			return err;
 492	}
 493
 
 
 
 
 494	if (!skb_cloned(skb)) {
 495		if (tailen <= skb_tailroom(skb)) {
 496			nfrags = 1;
 497			trailer = skb;
 498			tail = skb_tail_pointer(trailer);
 499
 500			goto skip_cow;
 501		} else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
 502			   && !skb_has_frag_list(skb)) {
 503			int allocsize;
 504			struct sock *sk = skb->sk;
 505			struct page_frag *pfrag = &x->xfrag;
 506
 507			esp->inplace = false;
 508
 509			allocsize = ALIGN(tailen, L1_CACHE_BYTES);
 510
 511			spin_lock_bh(&x->lock);
 512
 513			if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
 514				spin_unlock_bh(&x->lock);
 515				goto cow;
 516			}
 517
 518			page = pfrag->page;
 519			get_page(page);
 520
 521			tail = page_address(page) + pfrag->offset;
 522
 523			esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
 524
 525			nfrags = skb_shinfo(skb)->nr_frags;
 526
 527			__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
 528					     tailen);
 529			skb_shinfo(skb)->nr_frags = ++nfrags;
 530
 531			pfrag->offset = pfrag->offset + allocsize;
 532
 533			spin_unlock_bh(&x->lock);
 534
 535			nfrags++;
 536
 537			skb->len += tailen;
 538			skb->data_len += tailen;
 539			skb->truesize += tailen;
 540			if (sk && sk_fullsock(sk))
 541				refcount_add(tailen, &sk->sk_wmem_alloc);
 542
 543			goto out;
 544		}
 545	}
 546
 547cow:
 548	esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
 549
 550	nfrags = skb_cow_data(skb, tailen, &trailer);
 551	if (nfrags < 0)
 552		goto out;
 553	tail = skb_tail_pointer(trailer);
 554	esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
 555
 556skip_cow:
 557	esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
 558	pskb_put(skb, trailer, tailen);
 559
 560out:
 561	return nfrags;
 562}
 563EXPORT_SYMBOL_GPL(esp6_output_head);
 564
 565int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
 566{
 567	u8 *iv;
 568	int alen;
 569	void *tmp;
 570	int ivlen;
 571	int assoclen;
 572	int extralen;
 573	struct page *page;
 574	struct ip_esp_hdr *esph;
 575	struct aead_request *req;
 576	struct crypto_aead *aead;
 577	struct scatterlist *sg, *dsg;
 578	struct esp_output_extra *extra;
 579	int err = -ENOMEM;
 580
 581	assoclen = sizeof(struct ip_esp_hdr);
 582	extralen = 0;
 583
 584	if (x->props.flags & XFRM_STATE_ESN) {
 585		extralen += sizeof(*extra);
 586		assoclen += sizeof(__be32);
 587	}
 588
 589	aead = x->data;
 590	alen = crypto_aead_authsize(aead);
 591	ivlen = crypto_aead_ivsize(aead);
 592
 593	tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
 594	if (!tmp)
 595		goto error;
 596
 597	extra = esp_tmp_extra(tmp);
 598	iv = esp_tmp_iv(aead, tmp, extralen);
 599	req = esp_tmp_req(aead, iv);
 600	sg = esp_req_sg(aead, req);
 601
 602	if (esp->inplace)
 603		dsg = sg;
 604	else
 605		dsg = &sg[esp->nfrags];
 606
 607	esph = esp_output_set_esn(skb, x, esp->esph, extra);
 608	esp->esph = esph;
 609
 610	sg_init_table(sg, esp->nfrags);
 611	err = skb_to_sgvec(skb, sg,
 612		           (unsigned char *)esph - skb->data,
 613		           assoclen + ivlen + esp->clen + alen);
 614	if (unlikely(err < 0))
 615		goto error_free;
 616
 617	if (!esp->inplace) {
 618		int allocsize;
 619		struct page_frag *pfrag = &x->xfrag;
 620
 621		allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
 622
 623		spin_lock_bh(&x->lock);
 624		if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
 625			spin_unlock_bh(&x->lock);
 626			goto error_free;
 627		}
 628
 629		skb_shinfo(skb)->nr_frags = 1;
 630
 631		page = pfrag->page;
 632		get_page(page);
 633		/* replace page frags in skb with new page */
 634		__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
 635		pfrag->offset = pfrag->offset + allocsize;
 636		spin_unlock_bh(&x->lock);
 637
 638		sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
 639		err = skb_to_sgvec(skb, dsg,
 640			           (unsigned char *)esph - skb->data,
 641			           assoclen + ivlen + esp->clen + alen);
 642		if (unlikely(err < 0))
 643			goto error_free;
 644	}
 645
 646	if ((x->props.flags & XFRM_STATE_ESN))
 647		aead_request_set_callback(req, 0, esp_output_done_esn, skb);
 648	else
 649		aead_request_set_callback(req, 0, esp_output_done, skb);
 650
 651	aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
 652	aead_request_set_ad(req, assoclen);
 653
 654	memset(iv, 0, ivlen);
 655	memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
 656	       min(ivlen, 8));
 657
 658	ESP_SKB_CB(skb)->tmp = tmp;
 659	err = crypto_aead_encrypt(req);
 660
 661	switch (err) {
 662	case -EINPROGRESS:
 663		goto error;
 664
 665	case -ENOSPC:
 666		err = NET_XMIT_DROP;
 667		break;
 668
 669	case 0:
 670		if ((x->props.flags & XFRM_STATE_ESN))
 671			esp_output_restore_header(skb);
 672		esp_output_encap_csum(skb);
 673	}
 674
 675	if (sg != dsg)
 676		esp_ssg_unref(x, tmp);
 677
 678	if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
 679		err = esp_output_tail_tcp(x, skb);
 680
 681error_free:
 682	kfree(tmp);
 683error:
 684	return err;
 685}
 686EXPORT_SYMBOL_GPL(esp6_output_tail);
 687
 688static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
 689{
 690	int alen;
 691	int blksize;
 692	struct ip_esp_hdr *esph;
 693	struct crypto_aead *aead;
 694	struct esp_info esp;
 695
 696	esp.inplace = true;
 697
 698	esp.proto = *skb_mac_header(skb);
 699	*skb_mac_header(skb) = IPPROTO_ESP;
 700
 701	/* skb is pure payload to encrypt */
 702
 703	aead = x->data;
 704	alen = crypto_aead_authsize(aead);
 705
 706	esp.tfclen = 0;
 707	if (x->tfcpad) {
 708		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
 709		u32 padto;
 710
 711		padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
 712		if (skb->len < padto)
 713			esp.tfclen = padto - skb->len;
 714	}
 715	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
 716	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
 717	esp.plen = esp.clen - skb->len - esp.tfclen;
 718	esp.tailen = esp.tfclen + esp.plen + alen;
 719
 720	esp.esph = ip_esp_hdr(skb);
 721
 722	esp.nfrags = esp6_output_head(x, skb, &esp);
 723	if (esp.nfrags < 0)
 724		return esp.nfrags;
 725
 726	esph = esp.esph;
 727	esph->spi = x->id.spi;
 728
 729	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
 730	esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
 731			    ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
 732
 733	skb_push(skb, -skb_network_offset(skb));
 734
 735	return esp6_output_tail(x, skb, &esp);
 736}
 737
 738static inline int esp_remove_trailer(struct sk_buff *skb)
 739{
 740	struct xfrm_state *x = xfrm_input_state(skb);
 741	struct xfrm_offload *xo = xfrm_offload(skb);
 742	struct crypto_aead *aead = x->data;
 743	int alen, hlen, elen;
 744	int padlen, trimlen;
 745	__wsum csumdiff;
 746	u8 nexthdr[2];
 747	int ret;
 748
 749	alen = crypto_aead_authsize(aead);
 750	hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
 751	elen = skb->len - hlen;
 752
 753	if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
 754		ret = xo->proto;
 755		goto out;
 756	}
 757
 758	ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
 759	BUG_ON(ret);
 760
 761	ret = -EINVAL;
 762	padlen = nexthdr[0];
 763	if (padlen + 2 + alen >= elen) {
 764		net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
 765				    padlen + 2, elen - alen);
 766		goto out;
 767	}
 768
 769	trimlen = alen + padlen + 2;
 770	if (skb->ip_summed == CHECKSUM_COMPLETE) {
 771		csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
 772		skb->csum = csum_block_sub(skb->csum, csumdiff,
 773					   skb->len - trimlen);
 774	}
 775	pskb_trim(skb, skb->len - trimlen);
 
 
 776
 777	ret = nexthdr[1];
 778
 779out:
 780	return ret;
 781}
 782
 783int esp6_input_done2(struct sk_buff *skb, int err)
 784{
 785	struct xfrm_state *x = xfrm_input_state(skb);
 786	struct xfrm_offload *xo = xfrm_offload(skb);
 787	struct crypto_aead *aead = x->data;
 788	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
 789	int hdr_len = skb_network_header_len(skb);
 790
 791	if (!xo || !(xo->flags & CRYPTO_DONE))
 792		kfree(ESP_SKB_CB(skb)->tmp);
 793
 794	if (unlikely(err))
 795		goto out;
 796
 797	err = esp_remove_trailer(skb);
 798	if (unlikely(err < 0))
 799		goto out;
 800
 801	if (x->encap) {
 802		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 803		int offset = skb_network_offset(skb) + sizeof(*ip6h);
 804		struct xfrm_encap_tmpl *encap = x->encap;
 805		u8 nexthdr = ip6h->nexthdr;
 806		__be16 frag_off, source;
 807		struct udphdr *uh;
 808		struct tcphdr *th;
 809
 810		offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
 
 
 
 
 
 811		uh = (void *)(skb->data + offset);
 812		th = (void *)(skb->data + offset);
 813		hdr_len += offset;
 814
 815		switch (x->encap->encap_type) {
 816		case TCP_ENCAP_ESPINTCP:
 817			source = th->source;
 818			break;
 819		case UDP_ENCAP_ESPINUDP:
 820		case UDP_ENCAP_ESPINUDP_NON_IKE:
 821			source = uh->source;
 822			break;
 823		default:
 824			WARN_ON_ONCE(1);
 825			err = -EINVAL;
 826			goto out;
 827		}
 828
 829		/*
 830		 * 1) if the NAT-T peer's IP or port changed then
 831		 *    advertize the change to the keying daemon.
 832		 *    This is an inbound SA, so just compare
 833		 *    SRC ports.
 834		 */
 835		if (!ipv6_addr_equal(&ip6h->saddr, &x->props.saddr.in6) ||
 836		    source != encap->encap_sport) {
 837			xfrm_address_t ipaddr;
 838
 839			memcpy(&ipaddr.a6, &ip6h->saddr.s6_addr, sizeof(ipaddr.a6));
 840			km_new_mapping(x, &ipaddr, source);
 841
 842			/* XXX: perhaps add an extra
 843			 * policy check here, to see
 844			 * if we should allow or
 845			 * reject a packet from a
 846			 * different source
 847			 * address/port.
 848			 */
 849		}
 850
 851		/*
 852		 * 2) ignore UDP/TCP checksums in case
 853		 *    of NAT-T in Transport Mode, or
 854		 *    perform other post-processing fixes
 855		 *    as per draft-ietf-ipsec-udp-encaps-06,
 856		 *    section 3.1.2
 857		 */
 858		if (x->props.mode == XFRM_MODE_TRANSPORT)
 859			skb->ip_summed = CHECKSUM_UNNECESSARY;
 860	}
 861
 862	skb_postpull_rcsum(skb, skb_network_header(skb),
 863			   skb_network_header_len(skb));
 864	skb_pull_rcsum(skb, hlen);
 865	if (x->props.mode == XFRM_MODE_TUNNEL)
 866		skb_reset_transport_header(skb);
 867	else
 868		skb_set_transport_header(skb, -hdr_len);
 869
 870	/* RFC4303: Drop dummy packets without any error */
 871	if (err == IPPROTO_NONE)
 872		err = -EINVAL;
 873
 874out:
 875	return err;
 876}
 877EXPORT_SYMBOL_GPL(esp6_input_done2);
 878
 879static void esp_input_done(struct crypto_async_request *base, int err)
 880{
 881	struct sk_buff *skb = base->data;
 882
 883	xfrm_input_resume(skb, esp6_input_done2(skb, err));
 884}
 885
 886static void esp_input_restore_header(struct sk_buff *skb)
 887{
 888	esp_restore_header(skb, 0);
 889	__skb_pull(skb, 4);
 890}
 891
 892static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
 893{
 894	struct xfrm_state *x = xfrm_input_state(skb);
 895
 896	/* For ESN we move the header forward by 4 bytes to
 897	 * accomodate the high bits.  We will move it back after
 898	 * decryption.
 899	 */
 900	if ((x->props.flags & XFRM_STATE_ESN)) {
 901		struct ip_esp_hdr *esph = skb_push(skb, 4);
 902
 903		*seqhi = esph->spi;
 904		esph->spi = esph->seq_no;
 905		esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
 906	}
 907}
 908
 909static void esp_input_done_esn(struct crypto_async_request *base, int err)
 910{
 911	struct sk_buff *skb = base->data;
 912
 913	esp_input_restore_header(skb);
 914	esp_input_done(base, err);
 915}
 916
 917static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
 918{
 919	struct crypto_aead *aead = x->data;
 920	struct aead_request *req;
 921	struct sk_buff *trailer;
 922	int ivlen = crypto_aead_ivsize(aead);
 923	int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
 924	int nfrags;
 925	int assoclen;
 926	int seqhilen;
 927	int ret = 0;
 928	void *tmp;
 929	__be32 *seqhi;
 930	u8 *iv;
 931	struct scatterlist *sg;
 932
 933	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
 934		ret = -EINVAL;
 935		goto out;
 936	}
 937
 938	if (elen <= 0) {
 939		ret = -EINVAL;
 940		goto out;
 941	}
 942
 943	assoclen = sizeof(struct ip_esp_hdr);
 944	seqhilen = 0;
 945
 946	if (x->props.flags & XFRM_STATE_ESN) {
 947		seqhilen += sizeof(__be32);
 948		assoclen += seqhilen;
 949	}
 950
 951	if (!skb_cloned(skb)) {
 952		if (!skb_is_nonlinear(skb)) {
 953			nfrags = 1;
 954
 955			goto skip_cow;
 956		} else if (!skb_has_frag_list(skb)) {
 957			nfrags = skb_shinfo(skb)->nr_frags;
 958			nfrags++;
 959
 960			goto skip_cow;
 961		}
 962	}
 963
 964	nfrags = skb_cow_data(skb, 0, &trailer);
 965	if (nfrags < 0) {
 966		ret = -EINVAL;
 967		goto out;
 968	}
 969
 970skip_cow:
 971	ret = -ENOMEM;
 972	tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
 973	if (!tmp)
 974		goto out;
 975
 976	ESP_SKB_CB(skb)->tmp = tmp;
 977	seqhi = esp_tmp_extra(tmp);
 978	iv = esp_tmp_iv(aead, tmp, seqhilen);
 979	req = esp_tmp_req(aead, iv);
 980	sg = esp_req_sg(aead, req);
 981
 982	esp_input_set_header(skb, seqhi);
 983
 984	sg_init_table(sg, nfrags);
 985	ret = skb_to_sgvec(skb, sg, 0, skb->len);
 986	if (unlikely(ret < 0)) {
 987		kfree(tmp);
 988		goto out;
 989	}
 990
 991	skb->ip_summed = CHECKSUM_NONE;
 992
 993	if ((x->props.flags & XFRM_STATE_ESN))
 994		aead_request_set_callback(req, 0, esp_input_done_esn, skb);
 995	else
 996		aead_request_set_callback(req, 0, esp_input_done, skb);
 997
 998	aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
 999	aead_request_set_ad(req, assoclen);
1000
1001	ret = crypto_aead_decrypt(req);
1002	if (ret == -EINPROGRESS)
1003		goto out;
1004
1005	if ((x->props.flags & XFRM_STATE_ESN))
1006		esp_input_restore_header(skb);
1007
1008	ret = esp6_input_done2(skb, ret);
1009
1010out:
1011	return ret;
1012}
1013
1014static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
1015		    u8 type, u8 code, int offset, __be32 info)
1016{
1017	struct net *net = dev_net(skb->dev);
1018	const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
1019	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
1020	struct xfrm_state *x;
1021
1022	if (type != ICMPV6_PKT_TOOBIG &&
1023	    type != NDISC_REDIRECT)
1024		return 0;
1025
1026	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
1027			      esph->spi, IPPROTO_ESP, AF_INET6);
1028	if (!x)
1029		return 0;
1030
1031	if (type == NDISC_REDIRECT)
1032		ip6_redirect(skb, net, skb->dev->ifindex, 0,
1033			     sock_net_uid(net, NULL));
1034	else
1035		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
1036	xfrm_state_put(x);
1037
1038	return 0;
1039}
1040
1041static void esp6_destroy(struct xfrm_state *x)
1042{
1043	struct crypto_aead *aead = x->data;
1044
1045	if (!aead)
1046		return;
1047
1048	crypto_free_aead(aead);
1049}
1050
1051static int esp_init_aead(struct xfrm_state *x)
1052{
1053	char aead_name[CRYPTO_MAX_ALG_NAME];
1054	struct crypto_aead *aead;
1055	int err;
1056
1057	err = -ENAMETOOLONG;
1058	if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
1059		     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
1060		goto error;
 
 
1061
1062	aead = crypto_alloc_aead(aead_name, 0, 0);
1063	err = PTR_ERR(aead);
1064	if (IS_ERR(aead))
1065		goto error;
1066
1067	x->data = aead;
1068
1069	err = crypto_aead_setkey(aead, x->aead->alg_key,
1070				 (x->aead->alg_key_len + 7) / 8);
1071	if (err)
1072		goto error;
1073
1074	err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
1075	if (err)
1076		goto error;
1077
 
 
1078error:
 
1079	return err;
1080}
1081
1082static int esp_init_authenc(struct xfrm_state *x)
 
1083{
1084	struct crypto_aead *aead;
1085	struct crypto_authenc_key_param *param;
1086	struct rtattr *rta;
1087	char *key;
1088	char *p;
1089	char authenc_name[CRYPTO_MAX_ALG_NAME];
1090	unsigned int keylen;
1091	int err;
1092
1093	err = -EINVAL;
1094	if (!x->ealg)
1095		goto error;
1096
1097	err = -ENAMETOOLONG;
1098
1099	if ((x->props.flags & XFRM_STATE_ESN)) {
1100		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1101			     "%s%sauthencesn(%s,%s)%s",
1102			     x->geniv ?: "", x->geniv ? "(" : "",
1103			     x->aalg ? x->aalg->alg_name : "digest_null",
1104			     x->ealg->alg_name,
1105			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
 
1106			goto error;
 
1107	} else {
1108		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1109			     "%s%sauthenc(%s,%s)%s",
1110			     x->geniv ?: "", x->geniv ? "(" : "",
1111			     x->aalg ? x->aalg->alg_name : "digest_null",
1112			     x->ealg->alg_name,
1113			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
 
1114			goto error;
 
1115	}
1116
1117	aead = crypto_alloc_aead(authenc_name, 0, 0);
1118	err = PTR_ERR(aead);
1119	if (IS_ERR(aead))
 
1120		goto error;
 
1121
1122	x->data = aead;
1123
1124	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
1125		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
1126	err = -ENOMEM;
1127	key = kmalloc(keylen, GFP_KERNEL);
1128	if (!key)
1129		goto error;
1130
1131	p = key;
1132	rta = (void *)p;
1133	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
1134	rta->rta_len = RTA_LENGTH(sizeof(*param));
1135	param = RTA_DATA(rta);
1136	p += RTA_SPACE(sizeof(*param));
1137
1138	if (x->aalg) {
1139		struct xfrm_algo_desc *aalg_desc;
1140
1141		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
1142		p += (x->aalg->alg_key_len + 7) / 8;
1143
1144		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
1145		BUG_ON(!aalg_desc);
1146
1147		err = -EINVAL;
1148		if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
1149		    crypto_aead_authsize(aead)) {
1150			pr_info("ESP: %s digestsize %u != %u\n",
1151				x->aalg->alg_name,
1152				crypto_aead_authsize(aead),
1153				aalg_desc->uinfo.auth.icv_fullbits / 8);
1154			goto free_key;
1155		}
1156
1157		err = crypto_aead_setauthsize(
1158			aead, x->aalg->alg_trunc_len / 8);
1159		if (err)
 
1160			goto free_key;
 
1161	}
1162
1163	param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
1164	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
1165
1166	err = crypto_aead_setkey(aead, key, keylen);
1167
1168free_key:
1169	kfree(key);
1170
1171error:
1172	return err;
1173}
1174
1175static int esp6_init_state(struct xfrm_state *x)
1176{
1177	struct crypto_aead *aead;
1178	u32 align;
1179	int err;
1180
1181	x->data = NULL;
1182
1183	if (x->aead)
1184		err = esp_init_aead(x);
1185	else
1186		err = esp_init_authenc(x);
 
 
 
 
1187
1188	if (err)
1189		goto error;
1190
1191	aead = x->data;
1192
1193	x->props.header_len = sizeof(struct ip_esp_hdr) +
1194			      crypto_aead_ivsize(aead);
1195	switch (x->props.mode) {
1196	case XFRM_MODE_BEET:
1197		if (x->sel.family != AF_INET6)
1198			x->props.header_len += IPV4_BEET_PHMAXLEN +
1199					       (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
1200		break;
1201	default:
1202	case XFRM_MODE_TRANSPORT:
1203		break;
1204	case XFRM_MODE_TUNNEL:
1205		x->props.header_len += sizeof(struct ipv6hdr);
1206		break;
1207	}
1208
1209	if (x->encap) {
1210		struct xfrm_encap_tmpl *encap = x->encap;
1211
1212		switch (encap->encap_type) {
1213		default:
 
1214			err = -EINVAL;
1215			goto error;
1216		case UDP_ENCAP_ESPINUDP:
1217			x->props.header_len += sizeof(struct udphdr);
1218			break;
1219		case UDP_ENCAP_ESPINUDP_NON_IKE:
1220			x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
1221			break;
1222#ifdef CONFIG_INET6_ESPINTCP
1223		case TCP_ENCAP_ESPINTCP:
1224			/* only the length field, TCP encap is done by
1225			 * the socket
1226			 */
1227			x->props.header_len += 2;
1228			break;
1229#endif
1230		}
1231	}
1232
1233	align = ALIGN(crypto_aead_blocksize(aead), 4);
1234	x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
1235
1236error:
1237	return err;
1238}
1239
1240static int esp6_rcv_cb(struct sk_buff *skb, int err)
1241{
1242	return 0;
1243}
1244
1245static const struct xfrm_type esp6_type = {
1246	.owner		= THIS_MODULE,
1247	.proto		= IPPROTO_ESP,
1248	.flags		= XFRM_TYPE_REPLAY_PROT,
1249	.init_state	= esp6_init_state,
1250	.destructor	= esp6_destroy,
1251	.input		= esp6_input,
1252	.output		= esp6_output,
1253};
1254
1255static struct xfrm6_protocol esp6_protocol = {
1256	.handler	=	xfrm6_rcv,
1257	.input_handler	=	xfrm_input,
1258	.cb_handler	=	esp6_rcv_cb,
1259	.err_handler	=	esp6_err,
1260	.priority	=	0,
1261};
1262
1263static int __init esp6_init(void)
1264{
1265	if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
1266		pr_info("%s: can't add xfrm type\n", __func__);
1267		return -EAGAIN;
1268	}
1269	if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
1270		pr_info("%s: can't add protocol\n", __func__);
1271		xfrm_unregister_type(&esp6_type, AF_INET6);
1272		return -EAGAIN;
1273	}
1274
1275	return 0;
1276}
1277
1278static void __exit esp6_fini(void)
1279{
1280	if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
1281		pr_info("%s: can't remove protocol\n", __func__);
1282	xfrm_unregister_type(&esp6_type, AF_INET6);
1283}
1284
1285module_init(esp6_init);
1286module_exit(esp6_fini);
1287
 
1288MODULE_LICENSE("GPL");
1289MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (C)2002 USAGI/WIDE Project
   4 *
   5 * Authors
   6 *
   7 *	Mitsuru KANDA @USAGI       : IPv6 Support
   8 *	Kazunori MIYAZAWA @USAGI   :
   9 *	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  10 *
  11 *	This file is derived from net/ipv4/esp.c
  12 */
  13
  14#define pr_fmt(fmt) "IPv6: " fmt
  15
  16#include <crypto/aead.h>
  17#include <crypto/authenc.h>
  18#include <linux/err.h>
  19#include <linux/module.h>
  20#include <net/ip.h>
  21#include <net/xfrm.h>
  22#include <net/esp.h>
  23#include <linux/scatterlist.h>
  24#include <linux/kernel.h>
  25#include <linux/pfkeyv2.h>
  26#include <linux/random.h>
  27#include <linux/slab.h>
  28#include <linux/spinlock.h>
  29#include <net/ip6_checksum.h>
  30#include <net/ip6_route.h>
  31#include <net/icmp.h>
  32#include <net/ipv6.h>
  33#include <net/protocol.h>
  34#include <net/udp.h>
  35#include <linux/icmpv6.h>
  36#include <net/tcp.h>
  37#include <net/espintcp.h>
  38#include <net/inet6_hashtables.h>
  39#include <linux/skbuff_ref.h>
  40
  41#include <linux/highmem.h>
  42
  43struct esp_skb_cb {
  44	struct xfrm_skb_cb xfrm;
  45	void *tmp;
  46};
  47
  48struct esp_output_extra {
  49	__be32 seqhi;
  50	u32 esphoff;
  51};
  52
  53#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
  54
  55/*
  56 * Allocate an AEAD request structure with extra space for SG and IV.
  57 *
  58 * For alignment considerations the upper 32 bits of the sequence number are
  59 * placed at the front, if present. Followed by the IV, the request and finally
  60 * the SG list.
  61 *
  62 * TODO: Use spare space in skb for this where possible.
  63 */
  64static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
  65{
  66	unsigned int len;
  67
  68	len = seqihlen;
  69
  70	len += crypto_aead_ivsize(aead);
  71
  72	if (len) {
  73		len += crypto_aead_alignmask(aead) &
  74		       ~(crypto_tfm_ctx_alignment() - 1);
  75		len = ALIGN(len, crypto_tfm_ctx_alignment());
  76	}
  77
  78	len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
  79	len = ALIGN(len, __alignof__(struct scatterlist));
  80
  81	len += sizeof(struct scatterlist) * nfrags;
  82
  83	return kmalloc(len, GFP_ATOMIC);
  84}
  85
  86static inline void *esp_tmp_extra(void *tmp)
  87{
  88	return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
  89}
  90
  91static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
  92{
  93	return crypto_aead_ivsize(aead) ?
  94	       PTR_ALIGN((u8 *)tmp + seqhilen,
  95			 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
  96}
  97
  98static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
  99{
 100	struct aead_request *req;
 101
 102	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
 103				crypto_tfm_ctx_alignment());
 104	aead_request_set_tfm(req, aead);
 105	return req;
 106}
 107
 108static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
 109					     struct aead_request *req)
 110{
 111	return (void *)ALIGN((unsigned long)(req + 1) +
 112			     crypto_aead_reqsize(aead),
 113			     __alignof__(struct scatterlist));
 114}
 115
 116static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
 117{
 
 118	struct crypto_aead *aead = x->data;
 119	int extralen = 0;
 120	u8 *iv;
 121	struct aead_request *req;
 122	struct scatterlist *sg;
 123
 124	if (x->props.flags & XFRM_STATE_ESN)
 125		extralen += sizeof(struct esp_output_extra);
 126
 127	iv = esp_tmp_iv(aead, tmp, extralen);
 128	req = esp_tmp_req(aead, iv);
 129
 130	/* Unref skb_frag_pages in the src scatterlist if necessary.
 131	 * Skip the first sg which comes from skb->data.
 132	 */
 133	if (req->src != req->dst)
 134		for (sg = sg_next(req->src); sg; sg = sg_next(sg))
 135			skb_page_unref(page_to_netmem(sg_page(sg)),
 136				       skb->pp_recycle);
 137}
 138
 139#ifdef CONFIG_INET6_ESPINTCP
 140struct esp_tcp_sk {
 141	struct sock *sk;
 142	struct rcu_head rcu;
 143};
 144
 145static void esp_free_tcp_sk(struct rcu_head *head)
 146{
 147	struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
 148
 149	sock_put(esk->sk);
 150	kfree(esk);
 151}
 152
 153static struct sock *esp6_find_tcp_sk(struct xfrm_state *x)
 154{
 155	struct xfrm_encap_tmpl *encap = x->encap;
 156	struct net *net = xs_net(x);
 157	struct esp_tcp_sk *esk;
 158	__be16 sport, dport;
 159	struct sock *nsk;
 160	struct sock *sk;
 161
 162	sk = rcu_dereference(x->encap_sk);
 163	if (sk && sk->sk_state == TCP_ESTABLISHED)
 164		return sk;
 165
 166	spin_lock_bh(&x->lock);
 167	sport = encap->encap_sport;
 168	dport = encap->encap_dport;
 169	nsk = rcu_dereference_protected(x->encap_sk,
 170					lockdep_is_held(&x->lock));
 171	if (sk && sk == nsk) {
 172		esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
 173		if (!esk) {
 174			spin_unlock_bh(&x->lock);
 175			return ERR_PTR(-ENOMEM);
 176		}
 177		RCU_INIT_POINTER(x->encap_sk, NULL);
 178		esk->sk = sk;
 179		call_rcu(&esk->rcu, esp_free_tcp_sk);
 180	}
 181	spin_unlock_bh(&x->lock);
 182
 183	sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, &x->id.daddr.in6,
 184					dport, &x->props.saddr.in6, ntohs(sport), 0, 0);
 185	if (!sk)
 186		return ERR_PTR(-ENOENT);
 187
 188	if (!tcp_is_ulp_esp(sk)) {
 189		sock_put(sk);
 190		return ERR_PTR(-EINVAL);
 191	}
 192
 193	spin_lock_bh(&x->lock);
 194	nsk = rcu_dereference_protected(x->encap_sk,
 195					lockdep_is_held(&x->lock));
 196	if (encap->encap_sport != sport ||
 197	    encap->encap_dport != dport) {
 198		sock_put(sk);
 199		sk = nsk ?: ERR_PTR(-EREMCHG);
 200	} else if (sk == nsk) {
 201		sock_put(sk);
 202	} else {
 203		rcu_assign_pointer(x->encap_sk, sk);
 204	}
 205	spin_unlock_bh(&x->lock);
 206
 207	return sk;
 208}
 209
 210static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
 211{
 212	struct sock *sk;
 213	int err;
 214
 215	rcu_read_lock();
 216
 217	sk = esp6_find_tcp_sk(x);
 218	err = PTR_ERR_OR_ZERO(sk);
 219	if (err)
 220		goto out;
 221
 222	bh_lock_sock(sk);
 223	if (sock_owned_by_user(sk))
 224		err = espintcp_queue_out(sk, skb);
 225	else
 226		err = espintcp_push_skb(sk, skb);
 227	bh_unlock_sock(sk);
 228
 229out:
 230	rcu_read_unlock();
 231	return err;
 232}
 233
 234static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk,
 235				   struct sk_buff *skb)
 236{
 237	struct dst_entry *dst = skb_dst(skb);
 238	struct xfrm_state *x = dst->xfrm;
 239
 240	return esp_output_tcp_finish(x, skb);
 241}
 242
 243static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
 244{
 245	int err;
 246
 247	local_bh_disable();
 248	err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb);
 249	local_bh_enable();
 250
 251	/* EINPROGRESS just happens to do the right thing.  It
 252	 * actually means that the skb has been consumed and
 253	 * isn't coming back.
 254	 */
 255	return err ?: -EINPROGRESS;
 256}
 257#else
 258static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
 259{
 260	WARN_ON(1);
 
 261	return -EOPNOTSUPP;
 262}
 263#endif
 264
 265static void esp_output_encap_csum(struct sk_buff *skb)
 266{
 267	/* UDP encap with IPv6 requires a valid checksum */
 268	if (*skb_mac_header(skb) == IPPROTO_UDP) {
 269		struct udphdr *uh = udp_hdr(skb);
 270		struct ipv6hdr *ip6h = ipv6_hdr(skb);
 271		int len = ntohs(uh->len);
 272		unsigned int offset = skb_transport_offset(skb);
 273		__wsum csum = skb_checksum(skb, offset, skb->len - offset, 0);
 274
 275		uh->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
 276					    len, IPPROTO_UDP, csum);
 277		if (uh->check == 0)
 278			uh->check = CSUM_MANGLED_0;
 279	}
 280}
 281
 282static void esp_output_done(void *data, int err)
 283{
 284	struct sk_buff *skb = data;
 285	struct xfrm_offload *xo = xfrm_offload(skb);
 286	void *tmp;
 287	struct xfrm_state *x;
 288
 289	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
 290		struct sec_path *sp = skb_sec_path(skb);
 291
 292		x = sp->xvec[sp->len - 1];
 293	} else {
 294		x = skb_dst(skb)->xfrm;
 295	}
 296
 297	tmp = ESP_SKB_CB(skb)->tmp;
 298	esp_ssg_unref(x, tmp, skb);
 299	kfree(tmp);
 300
 301	esp_output_encap_csum(skb);
 302
 303	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
 304		if (err) {
 305			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
 306			kfree_skb(skb);
 307			return;
 308		}
 309
 310		skb_push(skb, skb->data - skb_mac_header(skb));
 311		secpath_reset(skb);
 312		xfrm_dev_resume(skb);
 313	} else {
 314		if (!err &&
 315		    x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
 316			esp_output_tail_tcp(x, skb);
 317		else
 318			xfrm_output_resume(skb->sk, skb, err);
 319	}
 320}
 321
 322/* Move ESP header back into place. */
 323static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
 324{
 325	struct ip_esp_hdr *esph = (void *)(skb->data + offset);
 326	void *tmp = ESP_SKB_CB(skb)->tmp;
 327	__be32 *seqhi = esp_tmp_extra(tmp);
 328
 329	esph->seq_no = esph->spi;
 330	esph->spi = *seqhi;
 331}
 332
 333static void esp_output_restore_header(struct sk_buff *skb)
 334{
 335	void *tmp = ESP_SKB_CB(skb)->tmp;
 336	struct esp_output_extra *extra = esp_tmp_extra(tmp);
 337
 338	esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
 339				sizeof(__be32));
 340}
 341
 342static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
 343					     struct xfrm_state *x,
 344					     struct ip_esp_hdr *esph,
 345					     struct esp_output_extra *extra)
 346{
 347	/* For ESN we move the header forward by 4 bytes to
 348	 * accommodate the high bits.  We will move it back after
 349	 * encryption.
 350	 */
 351	if ((x->props.flags & XFRM_STATE_ESN)) {
 352		__u32 seqhi;
 353		struct xfrm_offload *xo = xfrm_offload(skb);
 354
 355		if (xo)
 356			seqhi = xo->seq.hi;
 357		else
 358			seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
 359
 360		extra->esphoff = (unsigned char *)esph -
 361				 skb_transport_header(skb);
 362		esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
 363		extra->seqhi = esph->spi;
 364		esph->seq_no = htonl(seqhi);
 365	}
 366
 367	esph->spi = x->id.spi;
 368
 369	return esph;
 370}
 371
 372static void esp_output_done_esn(void *data, int err)
 373{
 374	struct sk_buff *skb = data;
 375
 376	esp_output_restore_header(skb);
 377	esp_output_done(data, err);
 378}
 379
 380static struct ip_esp_hdr *esp6_output_udp_encap(struct sk_buff *skb,
 381					       int encap_type,
 382					       struct esp_info *esp,
 383					       __be16 sport,
 384					       __be16 dport)
 385{
 386	struct udphdr *uh;
 
 387	unsigned int len;
 388
 389	len = skb->len + esp->tailen - skb_transport_offset(skb);
 390	if (len > U16_MAX)
 391		return ERR_PTR(-EMSGSIZE);
 392
 393	uh = (struct udphdr *)esp->esph;
 394	uh->source = sport;
 395	uh->dest = dport;
 396	uh->len = htons(len);
 397	uh->check = 0;
 398
 399	*skb_mac_header(skb) = IPPROTO_UDP;
 400
 
 
 
 
 
 
 401	return (struct ip_esp_hdr *)(uh + 1);
 402}
 403
 404#ifdef CONFIG_INET6_ESPINTCP
 405static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
 406						struct sk_buff *skb,
 407						struct esp_info *esp)
 408{
 409	__be16 *lenp = (void *)esp->esph;
 410	struct ip_esp_hdr *esph;
 411	unsigned int len;
 412	struct sock *sk;
 413
 414	len = skb->len + esp->tailen - skb_transport_offset(skb);
 415	if (len > IP_MAX_MTU)
 416		return ERR_PTR(-EMSGSIZE);
 417
 418	rcu_read_lock();
 419	sk = esp6_find_tcp_sk(x);
 420	rcu_read_unlock();
 421
 422	if (IS_ERR(sk))
 423		return ERR_CAST(sk);
 424
 425	*lenp = htons(len);
 426	esph = (struct ip_esp_hdr *)(lenp + 1);
 427
 428	return esph;
 429}
 430#else
 431static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
 432						struct sk_buff *skb,
 433						struct esp_info *esp)
 434{
 435	return ERR_PTR(-EOPNOTSUPP);
 436}
 437#endif
 438
 439static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb,
 440			    struct esp_info *esp)
 441{
 442	struct xfrm_encap_tmpl *encap = x->encap;
 443	struct ip_esp_hdr *esph;
 444	__be16 sport, dport;
 445	int encap_type;
 446
 447	spin_lock_bh(&x->lock);
 448	sport = encap->encap_sport;
 449	dport = encap->encap_dport;
 450	encap_type = encap->encap_type;
 451	spin_unlock_bh(&x->lock);
 452
 453	switch (encap_type) {
 454	default:
 455	case UDP_ENCAP_ESPINUDP:
 
 456		esph = esp6_output_udp_encap(skb, encap_type, esp, sport, dport);
 457		break;
 458	case TCP_ENCAP_ESPINTCP:
 459		esph = esp6_output_tcp_encap(x, skb, esp);
 460		break;
 461	}
 462
 463	if (IS_ERR(esph))
 464		return PTR_ERR(esph);
 465
 466	esp->esph = esph;
 467
 468	return 0;
 469}
 470
 471int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
 472{
 473	u8 *tail;
 474	int nfrags;
 475	int esph_offset;
 476	struct page *page;
 477	struct sk_buff *trailer;
 478	int tailen = esp->tailen;
 479
 480	if (x->encap) {
 481		int err = esp6_output_encap(x, skb, esp);
 482
 483		if (err < 0)
 484			return err;
 485	}
 486
 487	if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
 488	    ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
 489		goto cow;
 490
 491	if (!skb_cloned(skb)) {
 492		if (tailen <= skb_tailroom(skb)) {
 493			nfrags = 1;
 494			trailer = skb;
 495			tail = skb_tail_pointer(trailer);
 496
 497			goto skip_cow;
 498		} else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
 499			   && !skb_has_frag_list(skb)) {
 500			int allocsize;
 501			struct sock *sk = skb->sk;
 502			struct page_frag *pfrag = &x->xfrag;
 503
 504			esp->inplace = false;
 505
 506			allocsize = ALIGN(tailen, L1_CACHE_BYTES);
 507
 508			spin_lock_bh(&x->lock);
 509
 510			if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
 511				spin_unlock_bh(&x->lock);
 512				goto cow;
 513			}
 514
 515			page = pfrag->page;
 516			get_page(page);
 517
 518			tail = page_address(page) + pfrag->offset;
 519
 520			esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
 521
 522			nfrags = skb_shinfo(skb)->nr_frags;
 523
 524			__skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
 525					     tailen);
 526			skb_shinfo(skb)->nr_frags = ++nfrags;
 527
 528			pfrag->offset = pfrag->offset + allocsize;
 529
 530			spin_unlock_bh(&x->lock);
 531
 532			nfrags++;
 533
 534			skb->len += tailen;
 535			skb->data_len += tailen;
 536			skb->truesize += tailen;
 537			if (sk && sk_fullsock(sk))
 538				refcount_add(tailen, &sk->sk_wmem_alloc);
 539
 540			goto out;
 541		}
 542	}
 543
 544cow:
 545	esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
 546
 547	nfrags = skb_cow_data(skb, tailen, &trailer);
 548	if (nfrags < 0)
 549		goto out;
 550	tail = skb_tail_pointer(trailer);
 551	esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
 552
 553skip_cow:
 554	esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
 555	pskb_put(skb, trailer, tailen);
 556
 557out:
 558	return nfrags;
 559}
 560EXPORT_SYMBOL_GPL(esp6_output_head);
 561
 562int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
 563{
 564	u8 *iv;
 565	int alen;
 566	void *tmp;
 567	int ivlen;
 568	int assoclen;
 569	int extralen;
 570	struct page *page;
 571	struct ip_esp_hdr *esph;
 572	struct aead_request *req;
 573	struct crypto_aead *aead;
 574	struct scatterlist *sg, *dsg;
 575	struct esp_output_extra *extra;
 576	int err = -ENOMEM;
 577
 578	assoclen = sizeof(struct ip_esp_hdr);
 579	extralen = 0;
 580
 581	if (x->props.flags & XFRM_STATE_ESN) {
 582		extralen += sizeof(*extra);
 583		assoclen += sizeof(__be32);
 584	}
 585
 586	aead = x->data;
 587	alen = crypto_aead_authsize(aead);
 588	ivlen = crypto_aead_ivsize(aead);
 589
 590	tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
 591	if (!tmp)
 592		goto error;
 593
 594	extra = esp_tmp_extra(tmp);
 595	iv = esp_tmp_iv(aead, tmp, extralen);
 596	req = esp_tmp_req(aead, iv);
 597	sg = esp_req_sg(aead, req);
 598
 599	if (esp->inplace)
 600		dsg = sg;
 601	else
 602		dsg = &sg[esp->nfrags];
 603
 604	esph = esp_output_set_esn(skb, x, esp->esph, extra);
 605	esp->esph = esph;
 606
 607	sg_init_table(sg, esp->nfrags);
 608	err = skb_to_sgvec(skb, sg,
 609		           (unsigned char *)esph - skb->data,
 610		           assoclen + ivlen + esp->clen + alen);
 611	if (unlikely(err < 0))
 612		goto error_free;
 613
 614	if (!esp->inplace) {
 615		int allocsize;
 616		struct page_frag *pfrag = &x->xfrag;
 617
 618		allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
 619
 620		spin_lock_bh(&x->lock);
 621		if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
 622			spin_unlock_bh(&x->lock);
 623			goto error_free;
 624		}
 625
 626		skb_shinfo(skb)->nr_frags = 1;
 627
 628		page = pfrag->page;
 629		get_page(page);
 630		/* replace page frags in skb with new page */
 631		__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
 632		pfrag->offset = pfrag->offset + allocsize;
 633		spin_unlock_bh(&x->lock);
 634
 635		sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
 636		err = skb_to_sgvec(skb, dsg,
 637			           (unsigned char *)esph - skb->data,
 638			           assoclen + ivlen + esp->clen + alen);
 639		if (unlikely(err < 0))
 640			goto error_free;
 641	}
 642
 643	if ((x->props.flags & XFRM_STATE_ESN))
 644		aead_request_set_callback(req, 0, esp_output_done_esn, skb);
 645	else
 646		aead_request_set_callback(req, 0, esp_output_done, skb);
 647
 648	aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
 649	aead_request_set_ad(req, assoclen);
 650
 651	memset(iv, 0, ivlen);
 652	memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
 653	       min(ivlen, 8));
 654
 655	ESP_SKB_CB(skb)->tmp = tmp;
 656	err = crypto_aead_encrypt(req);
 657
 658	switch (err) {
 659	case -EINPROGRESS:
 660		goto error;
 661
 662	case -ENOSPC:
 663		err = NET_XMIT_DROP;
 664		break;
 665
 666	case 0:
 667		if ((x->props.flags & XFRM_STATE_ESN))
 668			esp_output_restore_header(skb);
 669		esp_output_encap_csum(skb);
 670	}
 671
 672	if (sg != dsg)
 673		esp_ssg_unref(x, tmp, skb);
 674
 675	if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
 676		err = esp_output_tail_tcp(x, skb);
 677
 678error_free:
 679	kfree(tmp);
 680error:
 681	return err;
 682}
 683EXPORT_SYMBOL_GPL(esp6_output_tail);
 684
 685static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
 686{
 687	int alen;
 688	int blksize;
 689	struct ip_esp_hdr *esph;
 690	struct crypto_aead *aead;
 691	struct esp_info esp;
 692
 693	esp.inplace = true;
 694
 695	esp.proto = *skb_mac_header(skb);
 696	*skb_mac_header(skb) = IPPROTO_ESP;
 697
 698	/* skb is pure payload to encrypt */
 699
 700	aead = x->data;
 701	alen = crypto_aead_authsize(aead);
 702
 703	esp.tfclen = 0;
 704	if (x->tfcpad) {
 705		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
 706		u32 padto;
 707
 708		padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
 709		if (skb->len < padto)
 710			esp.tfclen = padto - skb->len;
 711	}
 712	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
 713	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
 714	esp.plen = esp.clen - skb->len - esp.tfclen;
 715	esp.tailen = esp.tfclen + esp.plen + alen;
 716
 717	esp.esph = ip_esp_hdr(skb);
 718
 719	esp.nfrags = esp6_output_head(x, skb, &esp);
 720	if (esp.nfrags < 0)
 721		return esp.nfrags;
 722
 723	esph = esp.esph;
 724	esph->spi = x->id.spi;
 725
 726	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
 727	esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
 728			    ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
 729
 730	skb_push(skb, -skb_network_offset(skb));
 731
 732	return esp6_output_tail(x, skb, &esp);
 733}
 734
 735static inline int esp_remove_trailer(struct sk_buff *skb)
 736{
 737	struct xfrm_state *x = xfrm_input_state(skb);
 
 738	struct crypto_aead *aead = x->data;
 739	int alen, hlen, elen;
 740	int padlen, trimlen;
 741	__wsum csumdiff;
 742	u8 nexthdr[2];
 743	int ret;
 744
 745	alen = crypto_aead_authsize(aead);
 746	hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
 747	elen = skb->len - hlen;
 748
 
 
 
 
 
 749	ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
 750	BUG_ON(ret);
 751
 752	ret = -EINVAL;
 753	padlen = nexthdr[0];
 754	if (padlen + 2 + alen >= elen) {
 755		net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
 756				    padlen + 2, elen - alen);
 757		goto out;
 758	}
 759
 760	trimlen = alen + padlen + 2;
 761	if (skb->ip_summed == CHECKSUM_COMPLETE) {
 762		csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
 763		skb->csum = csum_block_sub(skb->csum, csumdiff,
 764					   skb->len - trimlen);
 765	}
 766	ret = pskb_trim(skb, skb->len - trimlen);
 767	if (unlikely(ret))
 768		return ret;
 769
 770	ret = nexthdr[1];
 771
 772out:
 773	return ret;
 774}
 775
 776int esp6_input_done2(struct sk_buff *skb, int err)
 777{
 778	struct xfrm_state *x = xfrm_input_state(skb);
 779	struct xfrm_offload *xo = xfrm_offload(skb);
 780	struct crypto_aead *aead = x->data;
 781	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
 782	int hdr_len = skb_network_header_len(skb);
 783
 784	if (!xo || !(xo->flags & CRYPTO_DONE))
 785		kfree(ESP_SKB_CB(skb)->tmp);
 786
 787	if (unlikely(err))
 788		goto out;
 789
 790	err = esp_remove_trailer(skb);
 791	if (unlikely(err < 0))
 792		goto out;
 793
 794	if (x->encap) {
 795		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 796		int offset = skb_network_offset(skb) + sizeof(*ip6h);
 797		struct xfrm_encap_tmpl *encap = x->encap;
 798		u8 nexthdr = ip6h->nexthdr;
 799		__be16 frag_off, source;
 800		struct udphdr *uh;
 801		struct tcphdr *th;
 802
 803		offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
 804		if (offset == -1) {
 805			err = -EINVAL;
 806			goto out;
 807		}
 808
 809		uh = (void *)(skb->data + offset);
 810		th = (void *)(skb->data + offset);
 811		hdr_len += offset;
 812
 813		switch (x->encap->encap_type) {
 814		case TCP_ENCAP_ESPINTCP:
 815			source = th->source;
 816			break;
 817		case UDP_ENCAP_ESPINUDP:
 
 818			source = uh->source;
 819			break;
 820		default:
 821			WARN_ON_ONCE(1);
 822			err = -EINVAL;
 823			goto out;
 824		}
 825
 826		/*
 827		 * 1) if the NAT-T peer's IP or port changed then
 828		 *    advertise the change to the keying daemon.
 829		 *    This is an inbound SA, so just compare
 830		 *    SRC ports.
 831		 */
 832		if (!ipv6_addr_equal(&ip6h->saddr, &x->props.saddr.in6) ||
 833		    source != encap->encap_sport) {
 834			xfrm_address_t ipaddr;
 835
 836			memcpy(&ipaddr.a6, &ip6h->saddr.s6_addr, sizeof(ipaddr.a6));
 837			km_new_mapping(x, &ipaddr, source);
 838
 839			/* XXX: perhaps add an extra
 840			 * policy check here, to see
 841			 * if we should allow or
 842			 * reject a packet from a
 843			 * different source
 844			 * address/port.
 845			 */
 846		}
 847
 848		/*
 849		 * 2) ignore UDP/TCP checksums in case
 850		 *    of NAT-T in Transport Mode, or
 851		 *    perform other post-processing fixes
 852		 *    as per draft-ietf-ipsec-udp-encaps-06,
 853		 *    section 3.1.2
 854		 */
 855		if (x->props.mode == XFRM_MODE_TRANSPORT)
 856			skb->ip_summed = CHECKSUM_UNNECESSARY;
 857	}
 858
 859	skb_postpull_rcsum(skb, skb_network_header(skb),
 860			   skb_network_header_len(skb));
 861	skb_pull_rcsum(skb, hlen);
 862	if (x->props.mode == XFRM_MODE_TUNNEL)
 863		skb_reset_transport_header(skb);
 864	else
 865		skb_set_transport_header(skb, -hdr_len);
 866
 867	/* RFC4303: Drop dummy packets without any error */
 868	if (err == IPPROTO_NONE)
 869		err = -EINVAL;
 870
 871out:
 872	return err;
 873}
 874EXPORT_SYMBOL_GPL(esp6_input_done2);
 875
 876static void esp_input_done(void *data, int err)
 877{
 878	struct sk_buff *skb = data;
 879
 880	xfrm_input_resume(skb, esp6_input_done2(skb, err));
 881}
 882
 883static void esp_input_restore_header(struct sk_buff *skb)
 884{
 885	esp_restore_header(skb, 0);
 886	__skb_pull(skb, 4);
 887}
 888
 889static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
 890{
 891	struct xfrm_state *x = xfrm_input_state(skb);
 892
 893	/* For ESN we move the header forward by 4 bytes to
 894	 * accommodate the high bits.  We will move it back after
 895	 * decryption.
 896	 */
 897	if ((x->props.flags & XFRM_STATE_ESN)) {
 898		struct ip_esp_hdr *esph = skb_push(skb, 4);
 899
 900		*seqhi = esph->spi;
 901		esph->spi = esph->seq_no;
 902		esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
 903	}
 904}
 905
 906static void esp_input_done_esn(void *data, int err)
 907{
 908	struct sk_buff *skb = data;
 909
 910	esp_input_restore_header(skb);
 911	esp_input_done(data, err);
 912}
 913
 914static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
 915{
 916	struct crypto_aead *aead = x->data;
 917	struct aead_request *req;
 918	struct sk_buff *trailer;
 919	int ivlen = crypto_aead_ivsize(aead);
 920	int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
 921	int nfrags;
 922	int assoclen;
 923	int seqhilen;
 924	int ret = 0;
 925	void *tmp;
 926	__be32 *seqhi;
 927	u8 *iv;
 928	struct scatterlist *sg;
 929
 930	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
 931		ret = -EINVAL;
 932		goto out;
 933	}
 934
 935	if (elen <= 0) {
 936		ret = -EINVAL;
 937		goto out;
 938	}
 939
 940	assoclen = sizeof(struct ip_esp_hdr);
 941	seqhilen = 0;
 942
 943	if (x->props.flags & XFRM_STATE_ESN) {
 944		seqhilen += sizeof(__be32);
 945		assoclen += seqhilen;
 946	}
 947
 948	if (!skb_cloned(skb)) {
 949		if (!skb_is_nonlinear(skb)) {
 950			nfrags = 1;
 951
 952			goto skip_cow;
 953		} else if (!skb_has_frag_list(skb)) {
 954			nfrags = skb_shinfo(skb)->nr_frags;
 955			nfrags++;
 956
 957			goto skip_cow;
 958		}
 959	}
 960
 961	nfrags = skb_cow_data(skb, 0, &trailer);
 962	if (nfrags < 0) {
 963		ret = -EINVAL;
 964		goto out;
 965	}
 966
 967skip_cow:
 968	ret = -ENOMEM;
 969	tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
 970	if (!tmp)
 971		goto out;
 972
 973	ESP_SKB_CB(skb)->tmp = tmp;
 974	seqhi = esp_tmp_extra(tmp);
 975	iv = esp_tmp_iv(aead, tmp, seqhilen);
 976	req = esp_tmp_req(aead, iv);
 977	sg = esp_req_sg(aead, req);
 978
 979	esp_input_set_header(skb, seqhi);
 980
 981	sg_init_table(sg, nfrags);
 982	ret = skb_to_sgvec(skb, sg, 0, skb->len);
 983	if (unlikely(ret < 0)) {
 984		kfree(tmp);
 985		goto out;
 986	}
 987
 988	skb->ip_summed = CHECKSUM_NONE;
 989
 990	if ((x->props.flags & XFRM_STATE_ESN))
 991		aead_request_set_callback(req, 0, esp_input_done_esn, skb);
 992	else
 993		aead_request_set_callback(req, 0, esp_input_done, skb);
 994
 995	aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
 996	aead_request_set_ad(req, assoclen);
 997
 998	ret = crypto_aead_decrypt(req);
 999	if (ret == -EINPROGRESS)
1000		goto out;
1001
1002	if ((x->props.flags & XFRM_STATE_ESN))
1003		esp_input_restore_header(skb);
1004
1005	ret = esp6_input_done2(skb, ret);
1006
1007out:
1008	return ret;
1009}
1010
1011static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
1012		    u8 type, u8 code, int offset, __be32 info)
1013{
1014	struct net *net = dev_net(skb->dev);
1015	const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
1016	struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
1017	struct xfrm_state *x;
1018
1019	if (type != ICMPV6_PKT_TOOBIG &&
1020	    type != NDISC_REDIRECT)
1021		return 0;
1022
1023	x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
1024			      esph->spi, IPPROTO_ESP, AF_INET6);
1025	if (!x)
1026		return 0;
1027
1028	if (type == NDISC_REDIRECT)
1029		ip6_redirect(skb, net, skb->dev->ifindex, 0,
1030			     sock_net_uid(net, NULL));
1031	else
1032		ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
1033	xfrm_state_put(x);
1034
1035	return 0;
1036}
1037
1038static void esp6_destroy(struct xfrm_state *x)
1039{
1040	struct crypto_aead *aead = x->data;
1041
1042	if (!aead)
1043		return;
1044
1045	crypto_free_aead(aead);
1046}
1047
1048static int esp_init_aead(struct xfrm_state *x, struct netlink_ext_ack *extack)
1049{
1050	char aead_name[CRYPTO_MAX_ALG_NAME];
1051	struct crypto_aead *aead;
1052	int err;
1053
 
1054	if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
1055		     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) {
1056		NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1057		return -ENAMETOOLONG;
1058	}
1059
1060	aead = crypto_alloc_aead(aead_name, 0, 0);
1061	err = PTR_ERR(aead);
1062	if (IS_ERR(aead))
1063		goto error;
1064
1065	x->data = aead;
1066
1067	err = crypto_aead_setkey(aead, x->aead->alg_key,
1068				 (x->aead->alg_key_len + 7) / 8);
1069	if (err)
1070		goto error;
1071
1072	err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
1073	if (err)
1074		goto error;
1075
1076	return 0;
1077
1078error:
1079	NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1080	return err;
1081}
1082
1083static int esp_init_authenc(struct xfrm_state *x,
1084			    struct netlink_ext_ack *extack)
1085{
1086	struct crypto_aead *aead;
1087	struct crypto_authenc_key_param *param;
1088	struct rtattr *rta;
1089	char *key;
1090	char *p;
1091	char authenc_name[CRYPTO_MAX_ALG_NAME];
1092	unsigned int keylen;
1093	int err;
1094
 
 
 
 
1095	err = -ENAMETOOLONG;
1096
1097	if ((x->props.flags & XFRM_STATE_ESN)) {
1098		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1099			     "%s%sauthencesn(%s,%s)%s",
1100			     x->geniv ?: "", x->geniv ? "(" : "",
1101			     x->aalg ? x->aalg->alg_name : "digest_null",
1102			     x->ealg->alg_name,
1103			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
1104			NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1105			goto error;
1106		}
1107	} else {
1108		if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1109			     "%s%sauthenc(%s,%s)%s",
1110			     x->geniv ?: "", x->geniv ? "(" : "",
1111			     x->aalg ? x->aalg->alg_name : "digest_null",
1112			     x->ealg->alg_name,
1113			     x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) {
1114			NL_SET_ERR_MSG(extack, "Algorithm name is too long");
1115			goto error;
1116		}
1117	}
1118
1119	aead = crypto_alloc_aead(authenc_name, 0, 0);
1120	err = PTR_ERR(aead);
1121	if (IS_ERR(aead)) {
1122		NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1123		goto error;
1124	}
1125
1126	x->data = aead;
1127
1128	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
1129		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
1130	err = -ENOMEM;
1131	key = kmalloc(keylen, GFP_KERNEL);
1132	if (!key)
1133		goto error;
1134
1135	p = key;
1136	rta = (void *)p;
1137	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
1138	rta->rta_len = RTA_LENGTH(sizeof(*param));
1139	param = RTA_DATA(rta);
1140	p += RTA_SPACE(sizeof(*param));
1141
1142	if (x->aalg) {
1143		struct xfrm_algo_desc *aalg_desc;
1144
1145		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
1146		p += (x->aalg->alg_key_len + 7) / 8;
1147
1148		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
1149		BUG_ON(!aalg_desc);
1150
1151		err = -EINVAL;
1152		if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
1153		    crypto_aead_authsize(aead)) {
1154			NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
 
 
 
1155			goto free_key;
1156		}
1157
1158		err = crypto_aead_setauthsize(
1159			aead, x->aalg->alg_trunc_len / 8);
1160		if (err) {
1161			NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
1162			goto free_key;
1163		}
1164	}
1165
1166	param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
1167	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
1168
1169	err = crypto_aead_setkey(aead, key, keylen);
1170
1171free_key:
1172	kfree(key);
1173
1174error:
1175	return err;
1176}
1177
1178static int esp6_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
1179{
1180	struct crypto_aead *aead;
1181	u32 align;
1182	int err;
1183
1184	x->data = NULL;
1185
1186	if (x->aead) {
1187		err = esp_init_aead(x, extack);
1188	} else if (x->ealg) {
1189		err = esp_init_authenc(x, extack);
1190	} else {
1191		NL_SET_ERR_MSG(extack, "ESP: AEAD or CRYPT must be provided");
1192		err = -EINVAL;
1193	}
1194
1195	if (err)
1196		goto error;
1197
1198	aead = x->data;
1199
1200	x->props.header_len = sizeof(struct ip_esp_hdr) +
1201			      crypto_aead_ivsize(aead);
1202	switch (x->props.mode) {
1203	case XFRM_MODE_BEET:
1204		if (x->sel.family != AF_INET6)
1205			x->props.header_len += IPV4_BEET_PHMAXLEN +
1206					       (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
1207		break;
1208	default:
1209	case XFRM_MODE_TRANSPORT:
1210		break;
1211	case XFRM_MODE_TUNNEL:
1212		x->props.header_len += sizeof(struct ipv6hdr);
1213		break;
1214	}
1215
1216	if (x->encap) {
1217		struct xfrm_encap_tmpl *encap = x->encap;
1218
1219		switch (encap->encap_type) {
1220		default:
1221			NL_SET_ERR_MSG(extack, "Unsupported encapsulation type for ESP");
1222			err = -EINVAL;
1223			goto error;
1224		case UDP_ENCAP_ESPINUDP:
1225			x->props.header_len += sizeof(struct udphdr);
1226			break;
 
 
 
1227#ifdef CONFIG_INET6_ESPINTCP
1228		case TCP_ENCAP_ESPINTCP:
1229			/* only the length field, TCP encap is done by
1230			 * the socket
1231			 */
1232			x->props.header_len += 2;
1233			break;
1234#endif
1235		}
1236	}
1237
1238	align = ALIGN(crypto_aead_blocksize(aead), 4);
1239	x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
1240
1241error:
1242	return err;
1243}
1244
1245static int esp6_rcv_cb(struct sk_buff *skb, int err)
1246{
1247	return 0;
1248}
1249
1250static const struct xfrm_type esp6_type = {
1251	.owner		= THIS_MODULE,
1252	.proto		= IPPROTO_ESP,
1253	.flags		= XFRM_TYPE_REPLAY_PROT,
1254	.init_state	= esp6_init_state,
1255	.destructor	= esp6_destroy,
1256	.input		= esp6_input,
1257	.output		= esp6_output,
1258};
1259
1260static struct xfrm6_protocol esp6_protocol = {
1261	.handler	=	xfrm6_rcv,
1262	.input_handler	=	xfrm_input,
1263	.cb_handler	=	esp6_rcv_cb,
1264	.err_handler	=	esp6_err,
1265	.priority	=	0,
1266};
1267
1268static int __init esp6_init(void)
1269{
1270	if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
1271		pr_info("%s: can't add xfrm type\n", __func__);
1272		return -EAGAIN;
1273	}
1274	if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
1275		pr_info("%s: can't add protocol\n", __func__);
1276		xfrm_unregister_type(&esp6_type, AF_INET6);
1277		return -EAGAIN;
1278	}
1279
1280	return 0;
1281}
1282
1283static void __exit esp6_fini(void)
1284{
1285	if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
1286		pr_info("%s: can't remove protocol\n", __func__);
1287	xfrm_unregister_type(&esp6_type, AF_INET6);
1288}
1289
1290module_init(esp6_init);
1291module_exit(esp6_fini);
1292
1293MODULE_DESCRIPTION("IPv6 ESP transformation helpers");
1294MODULE_LICENSE("GPL");
1295MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);