Linux Audio

Check our new training course

Loading...
v6.2
  1/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
  2 *
  3 * This software is available to you under a choice of one of two
  4 * licenses.  You may choose to be licensed under the terms of the GNU
  5 * General Public License (GPL) Version 2, available from the file
  6 * COPYING in the main directory of this source tree, or the
  7 * OpenIB.org BSD license below:
  8 *
  9 *     Redistribution and use in source and binary forms, with or
 10 *     without modification, are permitted provided that the following
 11 *     conditions are met:
 12 *
 13 *      - Redistributions of source code must retain the above
 14 *        copyright notice, this list of conditions and the following
 15 *        disclaimer.
 16 *
 17 *      - Redistributions in binary form must reproduce the above
 18 *        copyright notice, this list of conditions and the following
 19 *        disclaimer in the documentation and/or other materials
 20 *        provided with the distribution.
 21 *
 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 29 * SOFTWARE.
 30 */
 31
 32#include <net/tls.h>
 33#include <crypto/aead.h>
 34#include <crypto/scatterwalk.h>
 35#include <net/ip6_checksum.h>
 36
 37#include "tls.h"
 38
 39static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk)
 40{
 41	struct scatterlist *src = walk->sg;
 42	int diff = walk->offset - src->offset;
 43
 44	sg_set_page(sg, sg_page(src),
 45		    src->length - diff, walk->offset);
 46
 47	scatterwalk_crypto_chain(sg, sg_next(src), 2);
 48}
 49
 50static int tls_enc_record(struct aead_request *aead_req,
 51			  struct crypto_aead *aead, char *aad,
 52			  char *iv, __be64 rcd_sn,
 53			  struct scatter_walk *in,
 54			  struct scatter_walk *out, int *in_len,
 55			  struct tls_prot_info *prot)
 56{
 57	unsigned char buf[TLS_HEADER_SIZE + MAX_IV_SIZE];
 58	const struct tls_cipher_size_desc *cipher_sz;
 59	struct scatterlist sg_in[3];
 60	struct scatterlist sg_out[3];
 61	unsigned int buf_size;
 62	u16 len;
 63	int rc;
 64
 65	switch (prot->cipher_type) {
 66	case TLS_CIPHER_AES_GCM_128:
 67	case TLS_CIPHER_AES_GCM_256:
 68		break;
 69	default:
 70		return -EINVAL;
 71	}
 72	cipher_sz = &tls_cipher_size_desc[prot->cipher_type];
 73
 74	buf_size = TLS_HEADER_SIZE + cipher_sz->iv;
 75	len = min_t(int, *in_len, buf_size);
 76
 77	scatterwalk_copychunks(buf, in, len, 0);
 78	scatterwalk_copychunks(buf, out, len, 1);
 79
 80	*in_len -= len;
 81	if (!*in_len)
 82		return 0;
 83
 84	scatterwalk_pagedone(in, 0, 1);
 85	scatterwalk_pagedone(out, 1, 1);
 86
 87	len = buf[4] | (buf[3] << 8);
 88	len -= cipher_sz->iv;
 89
 90	tls_make_aad(aad, len - cipher_sz->tag, (char *)&rcd_sn, buf[0], prot);
 
 
 91
 92	memcpy(iv + cipher_sz->salt, buf + TLS_HEADER_SIZE, cipher_sz->iv);
 
 93
 94	sg_init_table(sg_in, ARRAY_SIZE(sg_in));
 95	sg_init_table(sg_out, ARRAY_SIZE(sg_out));
 96	sg_set_buf(sg_in, aad, TLS_AAD_SPACE_SIZE);
 97	sg_set_buf(sg_out, aad, TLS_AAD_SPACE_SIZE);
 98	chain_to_walk(sg_in + 1, in);
 99	chain_to_walk(sg_out + 1, out);
100
101	*in_len -= len;
102	if (*in_len < 0) {
103		*in_len += cipher_sz->tag;
104		/* the input buffer doesn't contain the entire record.
105		 * trim len accordingly. The resulting authentication tag
106		 * will contain garbage, but we don't care, so we won't
107		 * include any of it in the output skb
108		 * Note that we assume the output buffer length
109		 * is larger then input buffer length + tag size
110		 */
111		if (*in_len < 0)
112			len += *in_len;
113
114		*in_len = 0;
115	}
116
117	if (*in_len) {
118		scatterwalk_copychunks(NULL, in, len, 2);
119		scatterwalk_pagedone(in, 0, 1);
120		scatterwalk_copychunks(NULL, out, len, 2);
121		scatterwalk_pagedone(out, 1, 1);
122	}
123
124	len -= cipher_sz->tag;
125	aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv);
126
127	rc = crypto_aead_encrypt(aead_req);
128
129	return rc;
130}
131
132static void tls_init_aead_request(struct aead_request *aead_req,
133				  struct crypto_aead *aead)
134{
135	aead_request_set_tfm(aead_req, aead);
136	aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
137}
138
139static struct aead_request *tls_alloc_aead_request(struct crypto_aead *aead,
140						   gfp_t flags)
141{
142	unsigned int req_size = sizeof(struct aead_request) +
143		crypto_aead_reqsize(aead);
144	struct aead_request *aead_req;
145
146	aead_req = kzalloc(req_size, flags);
147	if (aead_req)
148		tls_init_aead_request(aead_req, aead);
149	return aead_req;
150}
151
152static int tls_enc_records(struct aead_request *aead_req,
153			   struct crypto_aead *aead, struct scatterlist *sg_in,
154			   struct scatterlist *sg_out, char *aad, char *iv,
155			   u64 rcd_sn, int len, struct tls_prot_info *prot)
156{
157	struct scatter_walk out, in;
158	int rc;
159
160	scatterwalk_start(&in, sg_in);
161	scatterwalk_start(&out, sg_out);
162
163	do {
164		rc = tls_enc_record(aead_req, aead, aad, iv,
165				    cpu_to_be64(rcd_sn), &in, &out, &len, prot);
166		rcd_sn++;
167
168	} while (rc == 0 && len);
169
170	scatterwalk_done(&in, 0, 0);
171	scatterwalk_done(&out, 1, 0);
172
173	return rc;
174}
175
176/* Can't use icsk->icsk_af_ops->send_check here because the ip addresses
177 * might have been changed by NAT.
178 */
179static void update_chksum(struct sk_buff *skb, int headln)
180{
181	struct tcphdr *th = tcp_hdr(skb);
182	int datalen = skb->len - headln;
183	const struct ipv6hdr *ipv6h;
184	const struct iphdr *iph;
185
186	/* We only changed the payload so if we are using partial we don't
187	 * need to update anything.
188	 */
189	if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
190		return;
191
192	skb->ip_summed = CHECKSUM_PARTIAL;
193	skb->csum_start = skb_transport_header(skb) - skb->head;
194	skb->csum_offset = offsetof(struct tcphdr, check);
195
196	if (skb->sk->sk_family == AF_INET6) {
197		ipv6h = ipv6_hdr(skb);
198		th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
199					     datalen, IPPROTO_TCP, 0);
200	} else {
201		iph = ip_hdr(skb);
202		th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen,
203					       IPPROTO_TCP, 0);
204	}
205}
206
207static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
208{
209	struct sock *sk = skb->sk;
210	int delta;
211
212	skb_copy_header(nskb, skb);
213
214	skb_put(nskb, skb->len);
215	memcpy(nskb->data, skb->data, headln);
216
217	nskb->destructor = skb->destructor;
218	nskb->sk = sk;
219	skb->destructor = NULL;
220	skb->sk = NULL;
221
222	update_chksum(nskb, headln);
223
224	/* sock_efree means skb must gone through skb_orphan_partial() */
225	if (nskb->destructor == sock_efree)
226		return;
227
228	delta = nskb->truesize - skb->truesize;
229	if (likely(delta < 0))
230		WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
231	else if (delta)
232		refcount_add(delta, &sk->sk_wmem_alloc);
233}
234
235/* This function may be called after the user socket is already
236 * closed so make sure we don't use anything freed during
237 * tls_sk_proto_close here
238 */
239
240static int fill_sg_in(struct scatterlist *sg_in,
241		      struct sk_buff *skb,
242		      struct tls_offload_context_tx *ctx,
243		      u64 *rcd_sn,
244		      s32 *sync_size,
245		      int *resync_sgs)
246{
247	int tcp_payload_offset = skb_tcp_all_headers(skb);
248	int payload_len = skb->len - tcp_payload_offset;
249	u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
250	struct tls_record_info *record;
251	unsigned long flags;
252	int remaining;
253	int i;
254
255	spin_lock_irqsave(&ctx->lock, flags);
256	record = tls_get_record(ctx, tcp_seq, rcd_sn);
257	if (!record) {
258		spin_unlock_irqrestore(&ctx->lock, flags);
259		return -EINVAL;
260	}
261
262	*sync_size = tcp_seq - tls_record_start_seq(record);
263	if (*sync_size < 0) {
264		int is_start_marker = tls_record_is_start_marker(record);
265
266		spin_unlock_irqrestore(&ctx->lock, flags);
267		/* This should only occur if the relevant record was
268		 * already acked. In that case it should be ok
269		 * to drop the packet and avoid retransmission.
270		 *
271		 * There is a corner case where the packet contains
272		 * both an acked and a non-acked record.
273		 * We currently don't handle that case and rely
274		 * on TCP to retranmit a packet that doesn't contain
275		 * already acked payload.
276		 */
277		if (!is_start_marker)
278			*sync_size = 0;
279		return -EINVAL;
280	}
281
282	remaining = *sync_size;
283	for (i = 0; remaining > 0; i++) {
284		skb_frag_t *frag = &record->frags[i];
285
286		__skb_frag_ref(frag);
287		sg_set_page(sg_in + i, skb_frag_page(frag),
288			    skb_frag_size(frag), skb_frag_off(frag));
289
290		remaining -= skb_frag_size(frag);
291
292		if (remaining < 0)
293			sg_in[i].length += remaining;
294	}
295	*resync_sgs = i;
296
297	spin_unlock_irqrestore(&ctx->lock, flags);
298	if (skb_to_sgvec(skb, &sg_in[i], tcp_payload_offset, payload_len) < 0)
299		return -EINVAL;
300
301	return 0;
302}
303
304static void fill_sg_out(struct scatterlist sg_out[3], void *buf,
305			struct tls_context *tls_ctx,
306			struct sk_buff *nskb,
307			int tcp_payload_offset,
308			int payload_len,
309			int sync_size,
310			void *dummy_buf)
311{
312	const struct tls_cipher_size_desc *cipher_sz =
313		&tls_cipher_size_desc[tls_ctx->crypto_send.info.cipher_type];
314
315	sg_set_buf(&sg_out[0], dummy_buf, sync_size);
316	sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len);
317	/* Add room for authentication tag produced by crypto */
318	dummy_buf += sync_size;
319	sg_set_buf(&sg_out[2], dummy_buf, cipher_sz->tag);
320}
321
322static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
323				   struct scatterlist sg_out[3],
324				   struct scatterlist *sg_in,
325				   struct sk_buff *skb,
326				   s32 sync_size, u64 rcd_sn)
327{
 
328	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
329	int tcp_payload_offset = skb_tcp_all_headers(skb);
330	int payload_len = skb->len - tcp_payload_offset;
331	const struct tls_cipher_size_desc *cipher_sz;
332	void *buf, *iv, *aad, *dummy_buf, *salt;
333	struct aead_request *aead_req;
334	struct sk_buff *nskb = NULL;
335	int buf_len;
336
337	aead_req = tls_alloc_aead_request(ctx->aead_send, GFP_ATOMIC);
338	if (!aead_req)
339		return NULL;
340
341	switch (tls_ctx->crypto_send.info.cipher_type) {
342	case TLS_CIPHER_AES_GCM_128:
343		salt = tls_ctx->crypto_send.aes_gcm_128.salt;
344		break;
345	case TLS_CIPHER_AES_GCM_256:
346		salt = tls_ctx->crypto_send.aes_gcm_256.salt;
347		break;
348	default:
349		goto free_req;
350	}
351	cipher_sz = &tls_cipher_size_desc[tls_ctx->crypto_send.info.cipher_type];
352	buf_len = cipher_sz->salt + cipher_sz->iv + TLS_AAD_SPACE_SIZE +
353		  sync_size + cipher_sz->tag;
354	buf = kmalloc(buf_len, GFP_ATOMIC);
355	if (!buf)
356		goto free_req;
357
358	iv = buf;
359	memcpy(iv, salt, cipher_sz->salt);
360	aad = buf + cipher_sz->salt + cipher_sz->iv;
 
 
361	dummy_buf = aad + TLS_AAD_SPACE_SIZE;
362
363	nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
364	if (!nskb)
365		goto free_buf;
366
367	skb_reserve(nskb, skb_headroom(skb));
368
369	fill_sg_out(sg_out, buf, tls_ctx, nskb, tcp_payload_offset,
370		    payload_len, sync_size, dummy_buf);
371
372	if (tls_enc_records(aead_req, ctx->aead_send, sg_in, sg_out, aad, iv,
373			    rcd_sn, sync_size + payload_len,
374			    &tls_ctx->prot_info) < 0)
375		goto free_nskb;
376
377	complete_skb(nskb, skb, tcp_payload_offset);
378
379	/* validate_xmit_skb_list assumes that if the skb wasn't segmented
380	 * nskb->prev will point to the skb itself
381	 */
382	nskb->prev = nskb;
383
384free_buf:
385	kfree(buf);
386free_req:
387	kfree(aead_req);
388	return nskb;
389free_nskb:
390	kfree_skb(nskb);
391	nskb = NULL;
392	goto free_buf;
393}
394
395static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
396{
397	int tcp_payload_offset = skb_tcp_all_headers(skb);
398	struct tls_context *tls_ctx = tls_get_ctx(sk);
399	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
400	int payload_len = skb->len - tcp_payload_offset;
401	struct scatterlist *sg_in, sg_out[3];
402	struct sk_buff *nskb = NULL;
403	int sg_in_max_elements;
404	int resync_sgs = 0;
405	s32 sync_size = 0;
406	u64 rcd_sn;
407
408	/* worst case is:
409	 * MAX_SKB_FRAGS in tls_record_info
410	 * MAX_SKB_FRAGS + 1 in SKB head and frags.
411	 */
412	sg_in_max_elements = 2 * MAX_SKB_FRAGS + 1;
413
414	if (!payload_len)
415		return skb;
416
417	sg_in = kmalloc_array(sg_in_max_elements, sizeof(*sg_in), GFP_ATOMIC);
418	if (!sg_in)
419		goto free_orig;
420
421	sg_init_table(sg_in, sg_in_max_elements);
422	sg_init_table(sg_out, ARRAY_SIZE(sg_out));
423
424	if (fill_sg_in(sg_in, skb, ctx, &rcd_sn, &sync_size, &resync_sgs)) {
425		/* bypass packets before kernel TLS socket option was set */
426		if (sync_size < 0 && payload_len <= -sync_size)
427			nskb = skb_get(skb);
428		goto put_sg;
429	}
430
431	nskb = tls_enc_skb(tls_ctx, sg_out, sg_in, skb, sync_size, rcd_sn);
432
433put_sg:
434	while (resync_sgs)
435		put_page(sg_page(&sg_in[--resync_sgs]));
436	kfree(sg_in);
437free_orig:
438	if (nskb)
439		consume_skb(skb);
440	else
441		kfree_skb(skb);
442	return nskb;
443}
444
445struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
446				      struct net_device *dev,
447				      struct sk_buff *skb)
448{
449	if (dev == rcu_dereference_bh(tls_get_ctx(sk)->netdev) ||
450	    netif_is_bond_master(dev))
451		return skb;
452
453	return tls_sw_fallback(sk, skb);
454}
455EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
456
457struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
458					 struct net_device *dev,
459					 struct sk_buff *skb)
460{
461	return tls_sw_fallback(sk, skb);
462}
463
464struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
465{
466	return tls_sw_fallback(skb->sk, skb);
467}
468EXPORT_SYMBOL_GPL(tls_encrypt_skb);
469
470int tls_sw_fallback_init(struct sock *sk,
471			 struct tls_offload_context_tx *offload_ctx,
472			 struct tls_crypto_info *crypto_info)
473{
474	const struct tls_cipher_size_desc *cipher_sz;
475	const u8 *key;
476	int rc;
477
478	offload_ctx->aead_send =
479	    crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
480	if (IS_ERR(offload_ctx->aead_send)) {
481		rc = PTR_ERR(offload_ctx->aead_send);
482		pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc);
483		offload_ctx->aead_send = NULL;
484		goto err_out;
485	}
486
487	switch (crypto_info->cipher_type) {
488	case TLS_CIPHER_AES_GCM_128:
489		key = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->key;
490		break;
491	case TLS_CIPHER_AES_GCM_256:
492		key = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->key;
493		break;
494	default:
495		rc = -EINVAL;
496		goto free_aead;
497	}
498	cipher_sz = &tls_cipher_size_desc[crypto_info->cipher_type];
499
500	rc = crypto_aead_setkey(offload_ctx->aead_send, key, cipher_sz->key);
 
501	if (rc)
502		goto free_aead;
503
504	rc = crypto_aead_setauthsize(offload_ctx->aead_send, cipher_sz->tag);
 
505	if (rc)
506		goto free_aead;
507
508	return 0;
509free_aead:
510	crypto_free_aead(offload_ctx->aead_send);
511err_out:
512	return rc;
513}
v5.9
  1/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
  2 *
  3 * This software is available to you under a choice of one of two
  4 * licenses.  You may choose to be licensed under the terms of the GNU
  5 * General Public License (GPL) Version 2, available from the file
  6 * COPYING in the main directory of this source tree, or the
  7 * OpenIB.org BSD license below:
  8 *
  9 *     Redistribution and use in source and binary forms, with or
 10 *     without modification, are permitted provided that the following
 11 *     conditions are met:
 12 *
 13 *      - Redistributions of source code must retain the above
 14 *        copyright notice, this list of conditions and the following
 15 *        disclaimer.
 16 *
 17 *      - Redistributions in binary form must reproduce the above
 18 *        copyright notice, this list of conditions and the following
 19 *        disclaimer in the documentation and/or other materials
 20 *        provided with the distribution.
 21 *
 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 29 * SOFTWARE.
 30 */
 31
 32#include <net/tls.h>
 33#include <crypto/aead.h>
 34#include <crypto/scatterwalk.h>
 35#include <net/ip6_checksum.h>
 36
 
 
 37static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk)
 38{
 39	struct scatterlist *src = walk->sg;
 40	int diff = walk->offset - src->offset;
 41
 42	sg_set_page(sg, sg_page(src),
 43		    src->length - diff, walk->offset);
 44
 45	scatterwalk_crypto_chain(sg, sg_next(src), 2);
 46}
 47
 48static int tls_enc_record(struct aead_request *aead_req,
 49			  struct crypto_aead *aead, char *aad,
 50			  char *iv, __be64 rcd_sn,
 51			  struct scatter_walk *in,
 52			  struct scatter_walk *out, int *in_len)
 
 53{
 54	unsigned char buf[TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE];
 
 55	struct scatterlist sg_in[3];
 56	struct scatterlist sg_out[3];
 
 57	u16 len;
 58	int rc;
 59
 60	len = min_t(int, *in_len, ARRAY_SIZE(buf));
 
 
 
 
 
 
 
 
 
 
 61
 62	scatterwalk_copychunks(buf, in, len, 0);
 63	scatterwalk_copychunks(buf, out, len, 1);
 64
 65	*in_len -= len;
 66	if (!*in_len)
 67		return 0;
 68
 69	scatterwalk_pagedone(in, 0, 1);
 70	scatterwalk_pagedone(out, 1, 1);
 71
 72	len = buf[4] | (buf[3] << 8);
 73	len -= TLS_CIPHER_AES_GCM_128_IV_SIZE;
 74
 75	tls_make_aad(aad, len - TLS_CIPHER_AES_GCM_128_TAG_SIZE,
 76		(char *)&rcd_sn, sizeof(rcd_sn), buf[0],
 77		TLS_1_2_VERSION);
 78
 79	memcpy(iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, buf + TLS_HEADER_SIZE,
 80	       TLS_CIPHER_AES_GCM_128_IV_SIZE);
 81
 82	sg_init_table(sg_in, ARRAY_SIZE(sg_in));
 83	sg_init_table(sg_out, ARRAY_SIZE(sg_out));
 84	sg_set_buf(sg_in, aad, TLS_AAD_SPACE_SIZE);
 85	sg_set_buf(sg_out, aad, TLS_AAD_SPACE_SIZE);
 86	chain_to_walk(sg_in + 1, in);
 87	chain_to_walk(sg_out + 1, out);
 88
 89	*in_len -= len;
 90	if (*in_len < 0) {
 91		*in_len += TLS_CIPHER_AES_GCM_128_TAG_SIZE;
 92		/* the input buffer doesn't contain the entire record.
 93		 * trim len accordingly. The resulting authentication tag
 94		 * will contain garbage, but we don't care, so we won't
 95		 * include any of it in the output skb
 96		 * Note that we assume the output buffer length
 97		 * is larger then input buffer length + tag size
 98		 */
 99		if (*in_len < 0)
100			len += *in_len;
101
102		*in_len = 0;
103	}
104
105	if (*in_len) {
106		scatterwalk_copychunks(NULL, in, len, 2);
107		scatterwalk_pagedone(in, 0, 1);
108		scatterwalk_copychunks(NULL, out, len, 2);
109		scatterwalk_pagedone(out, 1, 1);
110	}
111
112	len -= TLS_CIPHER_AES_GCM_128_TAG_SIZE;
113	aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv);
114
115	rc = crypto_aead_encrypt(aead_req);
116
117	return rc;
118}
119
120static void tls_init_aead_request(struct aead_request *aead_req,
121				  struct crypto_aead *aead)
122{
123	aead_request_set_tfm(aead_req, aead);
124	aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
125}
126
127static struct aead_request *tls_alloc_aead_request(struct crypto_aead *aead,
128						   gfp_t flags)
129{
130	unsigned int req_size = sizeof(struct aead_request) +
131		crypto_aead_reqsize(aead);
132	struct aead_request *aead_req;
133
134	aead_req = kzalloc(req_size, flags);
135	if (aead_req)
136		tls_init_aead_request(aead_req, aead);
137	return aead_req;
138}
139
140static int tls_enc_records(struct aead_request *aead_req,
141			   struct crypto_aead *aead, struct scatterlist *sg_in,
142			   struct scatterlist *sg_out, char *aad, char *iv,
143			   u64 rcd_sn, int len)
144{
145	struct scatter_walk out, in;
146	int rc;
147
148	scatterwalk_start(&in, sg_in);
149	scatterwalk_start(&out, sg_out);
150
151	do {
152		rc = tls_enc_record(aead_req, aead, aad, iv,
153				    cpu_to_be64(rcd_sn), &in, &out, &len);
154		rcd_sn++;
155
156	} while (rc == 0 && len);
157
158	scatterwalk_done(&in, 0, 0);
159	scatterwalk_done(&out, 1, 0);
160
161	return rc;
162}
163
164/* Can't use icsk->icsk_af_ops->send_check here because the ip addresses
165 * might have been changed by NAT.
166 */
167static void update_chksum(struct sk_buff *skb, int headln)
168{
169	struct tcphdr *th = tcp_hdr(skb);
170	int datalen = skb->len - headln;
171	const struct ipv6hdr *ipv6h;
172	const struct iphdr *iph;
173
174	/* We only changed the payload so if we are using partial we don't
175	 * need to update anything.
176	 */
177	if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
178		return;
179
180	skb->ip_summed = CHECKSUM_PARTIAL;
181	skb->csum_start = skb_transport_header(skb) - skb->head;
182	skb->csum_offset = offsetof(struct tcphdr, check);
183
184	if (skb->sk->sk_family == AF_INET6) {
185		ipv6h = ipv6_hdr(skb);
186		th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
187					     datalen, IPPROTO_TCP, 0);
188	} else {
189		iph = ip_hdr(skb);
190		th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen,
191					       IPPROTO_TCP, 0);
192	}
193}
194
195static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
196{
197	struct sock *sk = skb->sk;
198	int delta;
199
200	skb_copy_header(nskb, skb);
201
202	skb_put(nskb, skb->len);
203	memcpy(nskb->data, skb->data, headln);
204
205	nskb->destructor = skb->destructor;
206	nskb->sk = sk;
207	skb->destructor = NULL;
208	skb->sk = NULL;
209
210	update_chksum(nskb, headln);
211
212	/* sock_efree means skb must gone through skb_orphan_partial() */
213	if (nskb->destructor == sock_efree)
214		return;
215
216	delta = nskb->truesize - skb->truesize;
217	if (likely(delta < 0))
218		WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
219	else if (delta)
220		refcount_add(delta, &sk->sk_wmem_alloc);
221}
222
223/* This function may be called after the user socket is already
224 * closed so make sure we don't use anything freed during
225 * tls_sk_proto_close here
226 */
227
228static int fill_sg_in(struct scatterlist *sg_in,
229		      struct sk_buff *skb,
230		      struct tls_offload_context_tx *ctx,
231		      u64 *rcd_sn,
232		      s32 *sync_size,
233		      int *resync_sgs)
234{
235	int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
236	int payload_len = skb->len - tcp_payload_offset;
237	u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
238	struct tls_record_info *record;
239	unsigned long flags;
240	int remaining;
241	int i;
242
243	spin_lock_irqsave(&ctx->lock, flags);
244	record = tls_get_record(ctx, tcp_seq, rcd_sn);
245	if (!record) {
246		spin_unlock_irqrestore(&ctx->lock, flags);
247		return -EINVAL;
248	}
249
250	*sync_size = tcp_seq - tls_record_start_seq(record);
251	if (*sync_size < 0) {
252		int is_start_marker = tls_record_is_start_marker(record);
253
254		spin_unlock_irqrestore(&ctx->lock, flags);
255		/* This should only occur if the relevant record was
256		 * already acked. In that case it should be ok
257		 * to drop the packet and avoid retransmission.
258		 *
259		 * There is a corner case where the packet contains
260		 * both an acked and a non-acked record.
261		 * We currently don't handle that case and rely
262		 * on TCP to retranmit a packet that doesn't contain
263		 * already acked payload.
264		 */
265		if (!is_start_marker)
266			*sync_size = 0;
267		return -EINVAL;
268	}
269
270	remaining = *sync_size;
271	for (i = 0; remaining > 0; i++) {
272		skb_frag_t *frag = &record->frags[i];
273
274		__skb_frag_ref(frag);
275		sg_set_page(sg_in + i, skb_frag_page(frag),
276			    skb_frag_size(frag), skb_frag_off(frag));
277
278		remaining -= skb_frag_size(frag);
279
280		if (remaining < 0)
281			sg_in[i].length += remaining;
282	}
283	*resync_sgs = i;
284
285	spin_unlock_irqrestore(&ctx->lock, flags);
286	if (skb_to_sgvec(skb, &sg_in[i], tcp_payload_offset, payload_len) < 0)
287		return -EINVAL;
288
289	return 0;
290}
291
292static void fill_sg_out(struct scatterlist sg_out[3], void *buf,
293			struct tls_context *tls_ctx,
294			struct sk_buff *nskb,
295			int tcp_payload_offset,
296			int payload_len,
297			int sync_size,
298			void *dummy_buf)
299{
 
 
 
300	sg_set_buf(&sg_out[0], dummy_buf, sync_size);
301	sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len);
302	/* Add room for authentication tag produced by crypto */
303	dummy_buf += sync_size;
304	sg_set_buf(&sg_out[2], dummy_buf, TLS_CIPHER_AES_GCM_128_TAG_SIZE);
305}
306
307static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
308				   struct scatterlist sg_out[3],
309				   struct scatterlist *sg_in,
310				   struct sk_buff *skb,
311				   s32 sync_size, u64 rcd_sn)
312{
313	int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
314	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
 
315	int payload_len = skb->len - tcp_payload_offset;
316	void *buf, *iv, *aad, *dummy_buf;
 
317	struct aead_request *aead_req;
318	struct sk_buff *nskb = NULL;
319	int buf_len;
320
321	aead_req = tls_alloc_aead_request(ctx->aead_send, GFP_ATOMIC);
322	if (!aead_req)
323		return NULL;
324
325	buf_len = TLS_CIPHER_AES_GCM_128_SALT_SIZE +
326		  TLS_CIPHER_AES_GCM_128_IV_SIZE +
327		  TLS_AAD_SPACE_SIZE +
328		  sync_size +
329		  TLS_CIPHER_AES_GCM_128_TAG_SIZE;
 
 
 
 
 
 
 
 
330	buf = kmalloc(buf_len, GFP_ATOMIC);
331	if (!buf)
332		goto free_req;
333
334	iv = buf;
335	memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt,
336	       TLS_CIPHER_AES_GCM_128_SALT_SIZE);
337	aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE +
338	      TLS_CIPHER_AES_GCM_128_IV_SIZE;
339	dummy_buf = aad + TLS_AAD_SPACE_SIZE;
340
341	nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
342	if (!nskb)
343		goto free_buf;
344
345	skb_reserve(nskb, skb_headroom(skb));
346
347	fill_sg_out(sg_out, buf, tls_ctx, nskb, tcp_payload_offset,
348		    payload_len, sync_size, dummy_buf);
349
350	if (tls_enc_records(aead_req, ctx->aead_send, sg_in, sg_out, aad, iv,
351			    rcd_sn, sync_size + payload_len) < 0)
 
352		goto free_nskb;
353
354	complete_skb(nskb, skb, tcp_payload_offset);
355
356	/* validate_xmit_skb_list assumes that if the skb wasn't segmented
357	 * nskb->prev will point to the skb itself
358	 */
359	nskb->prev = nskb;
360
361free_buf:
362	kfree(buf);
363free_req:
364	kfree(aead_req);
365	return nskb;
366free_nskb:
367	kfree_skb(nskb);
368	nskb = NULL;
369	goto free_buf;
370}
371
372static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
373{
374	int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
375	struct tls_context *tls_ctx = tls_get_ctx(sk);
376	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
377	int payload_len = skb->len - tcp_payload_offset;
378	struct scatterlist *sg_in, sg_out[3];
379	struct sk_buff *nskb = NULL;
380	int sg_in_max_elements;
381	int resync_sgs = 0;
382	s32 sync_size = 0;
383	u64 rcd_sn;
384
385	/* worst case is:
386	 * MAX_SKB_FRAGS in tls_record_info
387	 * MAX_SKB_FRAGS + 1 in SKB head and frags.
388	 */
389	sg_in_max_elements = 2 * MAX_SKB_FRAGS + 1;
390
391	if (!payload_len)
392		return skb;
393
394	sg_in = kmalloc_array(sg_in_max_elements, sizeof(*sg_in), GFP_ATOMIC);
395	if (!sg_in)
396		goto free_orig;
397
398	sg_init_table(sg_in, sg_in_max_elements);
399	sg_init_table(sg_out, ARRAY_SIZE(sg_out));
400
401	if (fill_sg_in(sg_in, skb, ctx, &rcd_sn, &sync_size, &resync_sgs)) {
402		/* bypass packets before kernel TLS socket option was set */
403		if (sync_size < 0 && payload_len <= -sync_size)
404			nskb = skb_get(skb);
405		goto put_sg;
406	}
407
408	nskb = tls_enc_skb(tls_ctx, sg_out, sg_in, skb, sync_size, rcd_sn);
409
410put_sg:
411	while (resync_sgs)
412		put_page(sg_page(&sg_in[--resync_sgs]));
413	kfree(sg_in);
414free_orig:
415	if (nskb)
416		consume_skb(skb);
417	else
418		kfree_skb(skb);
419	return nskb;
420}
421
422struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
423				      struct net_device *dev,
424				      struct sk_buff *skb)
425{
426	if (dev == tls_get_ctx(sk)->netdev)
 
427		return skb;
428
429	return tls_sw_fallback(sk, skb);
430}
431EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
432
 
 
 
 
 
 
 
433struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
434{
435	return tls_sw_fallback(skb->sk, skb);
436}
437EXPORT_SYMBOL_GPL(tls_encrypt_skb);
438
439int tls_sw_fallback_init(struct sock *sk,
440			 struct tls_offload_context_tx *offload_ctx,
441			 struct tls_crypto_info *crypto_info)
442{
 
443	const u8 *key;
444	int rc;
445
446	offload_ctx->aead_send =
447	    crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
448	if (IS_ERR(offload_ctx->aead_send)) {
449		rc = PTR_ERR(offload_ctx->aead_send);
450		pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc);
451		offload_ctx->aead_send = NULL;
452		goto err_out;
453	}
454
455	key = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->key;
 
 
 
 
 
 
 
 
 
 
 
456
457	rc = crypto_aead_setkey(offload_ctx->aead_send, key,
458				TLS_CIPHER_AES_GCM_128_KEY_SIZE);
459	if (rc)
460		goto free_aead;
461
462	rc = crypto_aead_setauthsize(offload_ctx->aead_send,
463				     TLS_CIPHER_AES_GCM_128_TAG_SIZE);
464	if (rc)
465		goto free_aead;
466
467	return 0;
468free_aead:
469	crypto_free_aead(offload_ctx->aead_send);
470err_out:
471	return rc;
472}