Linux Audio

Check our new training course

Loading...
v5.14.15
  1/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
  2 *
  3 * This software is available to you under a choice of one of two
  4 * licenses.  You may choose to be licensed under the terms of the GNU
  5 * General Public License (GPL) Version 2, available from the file
  6 * COPYING in the main directory of this source tree, or the
  7 * OpenIB.org BSD license below:
  8 *
  9 *     Redistribution and use in source and binary forms, with or
 10 *     without modification, are permitted provided that the following
 11 *     conditions are met:
 12 *
 13 *      - Redistributions of source code must retain the above
 14 *        copyright notice, this list of conditions and the following
 15 *        disclaimer.
 16 *
 17 *      - Redistributions in binary form must reproduce the above
 18 *        copyright notice, this list of conditions and the following
 19 *        disclaimer in the documentation and/or other materials
 20 *        provided with the distribution.
 21 *
 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 29 * SOFTWARE.
 30 */
 31
 32#include <net/tls.h>
 33#include <crypto/aead.h>
 34#include <crypto/scatterwalk.h>
 35#include <net/ip6_checksum.h>
 36
 
 
 37static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk)
 38{
 39	struct scatterlist *src = walk->sg;
 40	int diff = walk->offset - src->offset;
 41
 42	sg_set_page(sg, sg_page(src),
 43		    src->length - diff, walk->offset);
 44
 45	scatterwalk_crypto_chain(sg, sg_next(src), 2);
 46}
 47
 48static int tls_enc_record(struct aead_request *aead_req,
 49			  struct crypto_aead *aead, char *aad,
 50			  char *iv, __be64 rcd_sn,
 51			  struct scatter_walk *in,
 52			  struct scatter_walk *out, int *in_len,
 53			  struct tls_prot_info *prot)
 54{
 55	unsigned char buf[TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE];
 
 56	struct scatterlist sg_in[3];
 57	struct scatterlist sg_out[3];
 
 58	u16 len;
 59	int rc;
 60
 61	len = min_t(int, *in_len, ARRAY_SIZE(buf));
 
 
 
 
 62
 63	scatterwalk_copychunks(buf, in, len, 0);
 64	scatterwalk_copychunks(buf, out, len, 1);
 65
 66	*in_len -= len;
 67	if (!*in_len)
 68		return 0;
 69
 70	scatterwalk_pagedone(in, 0, 1);
 71	scatterwalk_pagedone(out, 1, 1);
 72
 73	len = buf[4] | (buf[3] << 8);
 74	len -= TLS_CIPHER_AES_GCM_128_IV_SIZE;
 75
 76	tls_make_aad(aad, len - TLS_CIPHER_AES_GCM_128_TAG_SIZE,
 77		(char *)&rcd_sn, buf[0], prot);
 78
 79	memcpy(iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, buf + TLS_HEADER_SIZE,
 80	       TLS_CIPHER_AES_GCM_128_IV_SIZE);
 81
 82	sg_init_table(sg_in, ARRAY_SIZE(sg_in));
 83	sg_init_table(sg_out, ARRAY_SIZE(sg_out));
 84	sg_set_buf(sg_in, aad, TLS_AAD_SPACE_SIZE);
 85	sg_set_buf(sg_out, aad, TLS_AAD_SPACE_SIZE);
 86	chain_to_walk(sg_in + 1, in);
 87	chain_to_walk(sg_out + 1, out);
 88
 89	*in_len -= len;
 90	if (*in_len < 0) {
 91		*in_len += TLS_CIPHER_AES_GCM_128_TAG_SIZE;
 92		/* the input buffer doesn't contain the entire record.
 93		 * trim len accordingly. The resulting authentication tag
 94		 * will contain garbage, but we don't care, so we won't
 95		 * include any of it in the output skb
 96		 * Note that we assume the output buffer length
 97		 * is larger then input buffer length + tag size
 98		 */
 99		if (*in_len < 0)
100			len += *in_len;
101
102		*in_len = 0;
103	}
104
105	if (*in_len) {
106		scatterwalk_copychunks(NULL, in, len, 2);
107		scatterwalk_pagedone(in, 0, 1);
108		scatterwalk_copychunks(NULL, out, len, 2);
109		scatterwalk_pagedone(out, 1, 1);
110	}
111
112	len -= TLS_CIPHER_AES_GCM_128_TAG_SIZE;
113	aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv);
114
115	rc = crypto_aead_encrypt(aead_req);
116
117	return rc;
118}
119
120static void tls_init_aead_request(struct aead_request *aead_req,
121				  struct crypto_aead *aead)
122{
123	aead_request_set_tfm(aead_req, aead);
124	aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
125}
126
127static struct aead_request *tls_alloc_aead_request(struct crypto_aead *aead,
128						   gfp_t flags)
129{
130	unsigned int req_size = sizeof(struct aead_request) +
131		crypto_aead_reqsize(aead);
132	struct aead_request *aead_req;
133
134	aead_req = kzalloc(req_size, flags);
135	if (aead_req)
136		tls_init_aead_request(aead_req, aead);
137	return aead_req;
138}
139
140static int tls_enc_records(struct aead_request *aead_req,
141			   struct crypto_aead *aead, struct scatterlist *sg_in,
142			   struct scatterlist *sg_out, char *aad, char *iv,
143			   u64 rcd_sn, int len, struct tls_prot_info *prot)
144{
145	struct scatter_walk out, in;
146	int rc;
147
148	scatterwalk_start(&in, sg_in);
149	scatterwalk_start(&out, sg_out);
150
151	do {
152		rc = tls_enc_record(aead_req, aead, aad, iv,
153				    cpu_to_be64(rcd_sn), &in, &out, &len, prot);
154		rcd_sn++;
155
156	} while (rc == 0 && len);
157
158	scatterwalk_done(&in, 0, 0);
159	scatterwalk_done(&out, 1, 0);
160
161	return rc;
162}
163
164/* Can't use icsk->icsk_af_ops->send_check here because the ip addresses
165 * might have been changed by NAT.
166 */
167static void update_chksum(struct sk_buff *skb, int headln)
168{
169	struct tcphdr *th = tcp_hdr(skb);
170	int datalen = skb->len - headln;
171	const struct ipv6hdr *ipv6h;
172	const struct iphdr *iph;
173
174	/* We only changed the payload so if we are using partial we don't
175	 * need to update anything.
176	 */
177	if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
178		return;
179
180	skb->ip_summed = CHECKSUM_PARTIAL;
181	skb->csum_start = skb_transport_header(skb) - skb->head;
182	skb->csum_offset = offsetof(struct tcphdr, check);
183
184	if (skb->sk->sk_family == AF_INET6) {
185		ipv6h = ipv6_hdr(skb);
186		th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
187					     datalen, IPPROTO_TCP, 0);
188	} else {
189		iph = ip_hdr(skb);
190		th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen,
191					       IPPROTO_TCP, 0);
192	}
193}
194
195static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
196{
197	struct sock *sk = skb->sk;
198	int delta;
199
200	skb_copy_header(nskb, skb);
201
202	skb_put(nskb, skb->len);
203	memcpy(nskb->data, skb->data, headln);
204
205	nskb->destructor = skb->destructor;
206	nskb->sk = sk;
207	skb->destructor = NULL;
208	skb->sk = NULL;
209
210	update_chksum(nskb, headln);
211
212	/* sock_efree means skb must gone through skb_orphan_partial() */
213	if (nskb->destructor == sock_efree)
214		return;
215
216	delta = nskb->truesize - skb->truesize;
217	if (likely(delta < 0))
218		WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
219	else if (delta)
220		refcount_add(delta, &sk->sk_wmem_alloc);
221}
222
223/* This function may be called after the user socket is already
224 * closed so make sure we don't use anything freed during
225 * tls_sk_proto_close here
226 */
227
228static int fill_sg_in(struct scatterlist *sg_in,
229		      struct sk_buff *skb,
230		      struct tls_offload_context_tx *ctx,
231		      u64 *rcd_sn,
232		      s32 *sync_size,
233		      int *resync_sgs)
234{
235	int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
236	int payload_len = skb->len - tcp_payload_offset;
237	u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
238	struct tls_record_info *record;
239	unsigned long flags;
240	int remaining;
241	int i;
242
243	spin_lock_irqsave(&ctx->lock, flags);
244	record = tls_get_record(ctx, tcp_seq, rcd_sn);
245	if (!record) {
246		spin_unlock_irqrestore(&ctx->lock, flags);
247		return -EINVAL;
248	}
249
250	*sync_size = tcp_seq - tls_record_start_seq(record);
251	if (*sync_size < 0) {
252		int is_start_marker = tls_record_is_start_marker(record);
253
254		spin_unlock_irqrestore(&ctx->lock, flags);
255		/* This should only occur if the relevant record was
256		 * already acked. In that case it should be ok
257		 * to drop the packet and avoid retransmission.
258		 *
259		 * There is a corner case where the packet contains
260		 * both an acked and a non-acked record.
261		 * We currently don't handle that case and rely
262		 * on TCP to retranmit a packet that doesn't contain
263		 * already acked payload.
264		 */
265		if (!is_start_marker)
266			*sync_size = 0;
267		return -EINVAL;
268	}
269
270	remaining = *sync_size;
271	for (i = 0; remaining > 0; i++) {
272		skb_frag_t *frag = &record->frags[i];
273
274		__skb_frag_ref(frag);
275		sg_set_page(sg_in + i, skb_frag_page(frag),
276			    skb_frag_size(frag), skb_frag_off(frag));
277
278		remaining -= skb_frag_size(frag);
279
280		if (remaining < 0)
281			sg_in[i].length += remaining;
282	}
283	*resync_sgs = i;
284
285	spin_unlock_irqrestore(&ctx->lock, flags);
286	if (skb_to_sgvec(skb, &sg_in[i], tcp_payload_offset, payload_len) < 0)
287		return -EINVAL;
288
289	return 0;
290}
291
292static void fill_sg_out(struct scatterlist sg_out[3], void *buf,
293			struct tls_context *tls_ctx,
294			struct sk_buff *nskb,
295			int tcp_payload_offset,
296			int payload_len,
297			int sync_size,
298			void *dummy_buf)
299{
 
 
 
300	sg_set_buf(&sg_out[0], dummy_buf, sync_size);
301	sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len);
302	/* Add room for authentication tag produced by crypto */
303	dummy_buf += sync_size;
304	sg_set_buf(&sg_out[2], dummy_buf, TLS_CIPHER_AES_GCM_128_TAG_SIZE);
305}
306
307static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
308				   struct scatterlist sg_out[3],
309				   struct scatterlist *sg_in,
310				   struct sk_buff *skb,
311				   s32 sync_size, u64 rcd_sn)
312{
313	int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
314	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
 
315	int payload_len = skb->len - tcp_payload_offset;
316	void *buf, *iv, *aad, *dummy_buf;
 
317	struct aead_request *aead_req;
318	struct sk_buff *nskb = NULL;
319	int buf_len;
320
321	aead_req = tls_alloc_aead_request(ctx->aead_send, GFP_ATOMIC);
322	if (!aead_req)
323		return NULL;
324
325	buf_len = TLS_CIPHER_AES_GCM_128_SALT_SIZE +
326		  TLS_CIPHER_AES_GCM_128_IV_SIZE +
327		  TLS_AAD_SPACE_SIZE +
328		  sync_size +
329		  TLS_CIPHER_AES_GCM_128_TAG_SIZE;
330	buf = kmalloc(buf_len, GFP_ATOMIC);
331	if (!buf)
332		goto free_req;
333
334	iv = buf;
335	memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt,
336	       TLS_CIPHER_AES_GCM_128_SALT_SIZE);
337	aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE +
338	      TLS_CIPHER_AES_GCM_128_IV_SIZE;
339	dummy_buf = aad + TLS_AAD_SPACE_SIZE;
340
341	nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
342	if (!nskb)
343		goto free_buf;
344
345	skb_reserve(nskb, skb_headroom(skb));
346
347	fill_sg_out(sg_out, buf, tls_ctx, nskb, tcp_payload_offset,
348		    payload_len, sync_size, dummy_buf);
349
350	if (tls_enc_records(aead_req, ctx->aead_send, sg_in, sg_out, aad, iv,
351			    rcd_sn, sync_size + payload_len,
352			    &tls_ctx->prot_info) < 0)
353		goto free_nskb;
354
355	complete_skb(nskb, skb, tcp_payload_offset);
356
357	/* validate_xmit_skb_list assumes that if the skb wasn't segmented
358	 * nskb->prev will point to the skb itself
359	 */
360	nskb->prev = nskb;
361
362free_buf:
363	kfree(buf);
364free_req:
365	kfree(aead_req);
366	return nskb;
367free_nskb:
368	kfree_skb(nskb);
369	nskb = NULL;
370	goto free_buf;
371}
372
373static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
374{
375	int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
376	struct tls_context *tls_ctx = tls_get_ctx(sk);
377	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
378	int payload_len = skb->len - tcp_payload_offset;
379	struct scatterlist *sg_in, sg_out[3];
380	struct sk_buff *nskb = NULL;
381	int sg_in_max_elements;
382	int resync_sgs = 0;
383	s32 sync_size = 0;
384	u64 rcd_sn;
385
386	/* worst case is:
387	 * MAX_SKB_FRAGS in tls_record_info
388	 * MAX_SKB_FRAGS + 1 in SKB head and frags.
389	 */
390	sg_in_max_elements = 2 * MAX_SKB_FRAGS + 1;
391
392	if (!payload_len)
393		return skb;
394
395	sg_in = kmalloc_array(sg_in_max_elements, sizeof(*sg_in), GFP_ATOMIC);
396	if (!sg_in)
397		goto free_orig;
398
399	sg_init_table(sg_in, sg_in_max_elements);
400	sg_init_table(sg_out, ARRAY_SIZE(sg_out));
401
402	if (fill_sg_in(sg_in, skb, ctx, &rcd_sn, &sync_size, &resync_sgs)) {
403		/* bypass packets before kernel TLS socket option was set */
404		if (sync_size < 0 && payload_len <= -sync_size)
405			nskb = skb_get(skb);
406		goto put_sg;
407	}
408
409	nskb = tls_enc_skb(tls_ctx, sg_out, sg_in, skb, sync_size, rcd_sn);
410
411put_sg:
412	while (resync_sgs)
413		put_page(sg_page(&sg_in[--resync_sgs]));
414	kfree(sg_in);
415free_orig:
416	if (nskb)
417		consume_skb(skb);
418	else
419		kfree_skb(skb);
420	return nskb;
421}
422
423struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
424				      struct net_device *dev,
425				      struct sk_buff *skb)
426{
427	if (dev == tls_get_ctx(sk)->netdev || netif_is_bond_master(dev))
 
428		return skb;
429
430	return tls_sw_fallback(sk, skb);
431}
432EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
433
434struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
435					 struct net_device *dev,
436					 struct sk_buff *skb)
437{
438	return tls_sw_fallback(sk, skb);
439}
440
441struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
442{
443	return tls_sw_fallback(skb->sk, skb);
444}
445EXPORT_SYMBOL_GPL(tls_encrypt_skb);
446
447int tls_sw_fallback_init(struct sock *sk,
448			 struct tls_offload_context_tx *offload_ctx,
449			 struct tls_crypto_info *crypto_info)
450{
451	const u8 *key;
452	int rc;
453
 
 
 
 
454	offload_ctx->aead_send =
455	    crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
456	if (IS_ERR(offload_ctx->aead_send)) {
457		rc = PTR_ERR(offload_ctx->aead_send);
458		pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc);
459		offload_ctx->aead_send = NULL;
460		goto err_out;
461	}
462
463	key = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->key;
464
465	rc = crypto_aead_setkey(offload_ctx->aead_send, key,
466				TLS_CIPHER_AES_GCM_128_KEY_SIZE);
467	if (rc)
468		goto free_aead;
469
470	rc = crypto_aead_setauthsize(offload_ctx->aead_send,
471				     TLS_CIPHER_AES_GCM_128_TAG_SIZE);
472	if (rc)
473		goto free_aead;
474
475	return 0;
476free_aead:
477	crypto_free_aead(offload_ctx->aead_send);
478err_out:
479	return rc;
480}
v6.8
  1/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
  2 *
  3 * This software is available to you under a choice of one of two
  4 * licenses.  You may choose to be licensed under the terms of the GNU
  5 * General Public License (GPL) Version 2, available from the file
  6 * COPYING in the main directory of this source tree, or the
  7 * OpenIB.org BSD license below:
  8 *
  9 *     Redistribution and use in source and binary forms, with or
 10 *     without modification, are permitted provided that the following
 11 *     conditions are met:
 12 *
 13 *      - Redistributions of source code must retain the above
 14 *        copyright notice, this list of conditions and the following
 15 *        disclaimer.
 16 *
 17 *      - Redistributions in binary form must reproduce the above
 18 *        copyright notice, this list of conditions and the following
 19 *        disclaimer in the documentation and/or other materials
 20 *        provided with the distribution.
 21 *
 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 29 * SOFTWARE.
 30 */
 31
 32#include <net/tls.h>
 33#include <crypto/aead.h>
 34#include <crypto/scatterwalk.h>
 35#include <net/ip6_checksum.h>
 36
 37#include "tls.h"
 38
 39static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk)
 40{
 41	struct scatterlist *src = walk->sg;
 42	int diff = walk->offset - src->offset;
 43
 44	sg_set_page(sg, sg_page(src),
 45		    src->length - diff, walk->offset);
 46
 47	scatterwalk_crypto_chain(sg, sg_next(src), 2);
 48}
 49
 50static int tls_enc_record(struct aead_request *aead_req,
 51			  struct crypto_aead *aead, char *aad,
 52			  char *iv, __be64 rcd_sn,
 53			  struct scatter_walk *in,
 54			  struct scatter_walk *out, int *in_len,
 55			  struct tls_prot_info *prot)
 56{
 57	unsigned char buf[TLS_HEADER_SIZE + TLS_MAX_IV_SIZE];
 58	const struct tls_cipher_desc *cipher_desc;
 59	struct scatterlist sg_in[3];
 60	struct scatterlist sg_out[3];
 61	unsigned int buf_size;
 62	u16 len;
 63	int rc;
 64
 65	cipher_desc = get_cipher_desc(prot->cipher_type);
 66	DEBUG_NET_WARN_ON_ONCE(!cipher_desc || !cipher_desc->offloadable);
 67
 68	buf_size = TLS_HEADER_SIZE + cipher_desc->iv;
 69	len = min_t(int, *in_len, buf_size);
 70
 71	scatterwalk_copychunks(buf, in, len, 0);
 72	scatterwalk_copychunks(buf, out, len, 1);
 73
 74	*in_len -= len;
 75	if (!*in_len)
 76		return 0;
 77
 78	scatterwalk_pagedone(in, 0, 1);
 79	scatterwalk_pagedone(out, 1, 1);
 80
 81	len = buf[4] | (buf[3] << 8);
 82	len -= cipher_desc->iv;
 83
 84	tls_make_aad(aad, len - cipher_desc->tag, (char *)&rcd_sn, buf[0], prot);
 
 85
 86	memcpy(iv + cipher_desc->salt, buf + TLS_HEADER_SIZE, cipher_desc->iv);
 
 87
 88	sg_init_table(sg_in, ARRAY_SIZE(sg_in));
 89	sg_init_table(sg_out, ARRAY_SIZE(sg_out));
 90	sg_set_buf(sg_in, aad, TLS_AAD_SPACE_SIZE);
 91	sg_set_buf(sg_out, aad, TLS_AAD_SPACE_SIZE);
 92	chain_to_walk(sg_in + 1, in);
 93	chain_to_walk(sg_out + 1, out);
 94
 95	*in_len -= len;
 96	if (*in_len < 0) {
 97		*in_len += cipher_desc->tag;
 98		/* the input buffer doesn't contain the entire record.
 99		 * trim len accordingly. The resulting authentication tag
100		 * will contain garbage, but we don't care, so we won't
101		 * include any of it in the output skb
102		 * Note that we assume the output buffer length
103		 * is larger then input buffer length + tag size
104		 */
105		if (*in_len < 0)
106			len += *in_len;
107
108		*in_len = 0;
109	}
110
111	if (*in_len) {
112		scatterwalk_copychunks(NULL, in, len, 2);
113		scatterwalk_pagedone(in, 0, 1);
114		scatterwalk_copychunks(NULL, out, len, 2);
115		scatterwalk_pagedone(out, 1, 1);
116	}
117
118	len -= cipher_desc->tag;
119	aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv);
120
121	rc = crypto_aead_encrypt(aead_req);
122
123	return rc;
124}
125
126static void tls_init_aead_request(struct aead_request *aead_req,
127				  struct crypto_aead *aead)
128{
129	aead_request_set_tfm(aead_req, aead);
130	aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
131}
132
133static struct aead_request *tls_alloc_aead_request(struct crypto_aead *aead,
134						   gfp_t flags)
135{
136	unsigned int req_size = sizeof(struct aead_request) +
137		crypto_aead_reqsize(aead);
138	struct aead_request *aead_req;
139
140	aead_req = kzalloc(req_size, flags);
141	if (aead_req)
142		tls_init_aead_request(aead_req, aead);
143	return aead_req;
144}
145
146static int tls_enc_records(struct aead_request *aead_req,
147			   struct crypto_aead *aead, struct scatterlist *sg_in,
148			   struct scatterlist *sg_out, char *aad, char *iv,
149			   u64 rcd_sn, int len, struct tls_prot_info *prot)
150{
151	struct scatter_walk out, in;
152	int rc;
153
154	scatterwalk_start(&in, sg_in);
155	scatterwalk_start(&out, sg_out);
156
157	do {
158		rc = tls_enc_record(aead_req, aead, aad, iv,
159				    cpu_to_be64(rcd_sn), &in, &out, &len, prot);
160		rcd_sn++;
161
162	} while (rc == 0 && len);
163
164	scatterwalk_done(&in, 0, 0);
165	scatterwalk_done(&out, 1, 0);
166
167	return rc;
168}
169
170/* Can't use icsk->icsk_af_ops->send_check here because the ip addresses
171 * might have been changed by NAT.
172 */
173static void update_chksum(struct sk_buff *skb, int headln)
174{
175	struct tcphdr *th = tcp_hdr(skb);
176	int datalen = skb->len - headln;
177	const struct ipv6hdr *ipv6h;
178	const struct iphdr *iph;
179
180	/* We only changed the payload so if we are using partial we don't
181	 * need to update anything.
182	 */
183	if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
184		return;
185
186	skb->ip_summed = CHECKSUM_PARTIAL;
187	skb->csum_start = skb_transport_header(skb) - skb->head;
188	skb->csum_offset = offsetof(struct tcphdr, check);
189
190	if (skb->sk->sk_family == AF_INET6) {
191		ipv6h = ipv6_hdr(skb);
192		th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
193					     datalen, IPPROTO_TCP, 0);
194	} else {
195		iph = ip_hdr(skb);
196		th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen,
197					       IPPROTO_TCP, 0);
198	}
199}
200
201static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
202{
203	struct sock *sk = skb->sk;
204	int delta;
205
206	skb_copy_header(nskb, skb);
207
208	skb_put(nskb, skb->len);
209	memcpy(nskb->data, skb->data, headln);
210
211	nskb->destructor = skb->destructor;
212	nskb->sk = sk;
213	skb->destructor = NULL;
214	skb->sk = NULL;
215
216	update_chksum(nskb, headln);
217
218	/* sock_efree means skb must gone through skb_orphan_partial() */
219	if (nskb->destructor == sock_efree)
220		return;
221
222	delta = nskb->truesize - skb->truesize;
223	if (likely(delta < 0))
224		WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
225	else if (delta)
226		refcount_add(delta, &sk->sk_wmem_alloc);
227}
228
229/* This function may be called after the user socket is already
230 * closed so make sure we don't use anything freed during
231 * tls_sk_proto_close here
232 */
233
234static int fill_sg_in(struct scatterlist *sg_in,
235		      struct sk_buff *skb,
236		      struct tls_offload_context_tx *ctx,
237		      u64 *rcd_sn,
238		      s32 *sync_size,
239		      int *resync_sgs)
240{
241	int tcp_payload_offset = skb_tcp_all_headers(skb);
242	int payload_len = skb->len - tcp_payload_offset;
243	u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
244	struct tls_record_info *record;
245	unsigned long flags;
246	int remaining;
247	int i;
248
249	spin_lock_irqsave(&ctx->lock, flags);
250	record = tls_get_record(ctx, tcp_seq, rcd_sn);
251	if (!record) {
252		spin_unlock_irqrestore(&ctx->lock, flags);
253		return -EINVAL;
254	}
255
256	*sync_size = tcp_seq - tls_record_start_seq(record);
257	if (*sync_size < 0) {
258		int is_start_marker = tls_record_is_start_marker(record);
259
260		spin_unlock_irqrestore(&ctx->lock, flags);
261		/* This should only occur if the relevant record was
262		 * already acked. In that case it should be ok
263		 * to drop the packet and avoid retransmission.
264		 *
265		 * There is a corner case where the packet contains
266		 * both an acked and a non-acked record.
267		 * We currently don't handle that case and rely
268		 * on TCP to retransmit a packet that doesn't contain
269		 * already acked payload.
270		 */
271		if (!is_start_marker)
272			*sync_size = 0;
273		return -EINVAL;
274	}
275
276	remaining = *sync_size;
277	for (i = 0; remaining > 0; i++) {
278		skb_frag_t *frag = &record->frags[i];
279
280		__skb_frag_ref(frag);
281		sg_set_page(sg_in + i, skb_frag_page(frag),
282			    skb_frag_size(frag), skb_frag_off(frag));
283
284		remaining -= skb_frag_size(frag);
285
286		if (remaining < 0)
287			sg_in[i].length += remaining;
288	}
289	*resync_sgs = i;
290
291	spin_unlock_irqrestore(&ctx->lock, flags);
292	if (skb_to_sgvec(skb, &sg_in[i], tcp_payload_offset, payload_len) < 0)
293		return -EINVAL;
294
295	return 0;
296}
297
298static void fill_sg_out(struct scatterlist sg_out[3], void *buf,
299			struct tls_context *tls_ctx,
300			struct sk_buff *nskb,
301			int tcp_payload_offset,
302			int payload_len,
303			int sync_size,
304			void *dummy_buf)
305{
306	const struct tls_cipher_desc *cipher_desc =
307		get_cipher_desc(tls_ctx->crypto_send.info.cipher_type);
308
309	sg_set_buf(&sg_out[0], dummy_buf, sync_size);
310	sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len);
311	/* Add room for authentication tag produced by crypto */
312	dummy_buf += sync_size;
313	sg_set_buf(&sg_out[2], dummy_buf, cipher_desc->tag);
314}
315
316static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
317				   struct scatterlist sg_out[3],
318				   struct scatterlist *sg_in,
319				   struct sk_buff *skb,
320				   s32 sync_size, u64 rcd_sn)
321{
 
322	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
323	int tcp_payload_offset = skb_tcp_all_headers(skb);
324	int payload_len = skb->len - tcp_payload_offset;
325	const struct tls_cipher_desc *cipher_desc;
326	void *buf, *iv, *aad, *dummy_buf, *salt;
327	struct aead_request *aead_req;
328	struct sk_buff *nskb = NULL;
329	int buf_len;
330
331	aead_req = tls_alloc_aead_request(ctx->aead_send, GFP_ATOMIC);
332	if (!aead_req)
333		return NULL;
334
335	cipher_desc = get_cipher_desc(tls_ctx->crypto_send.info.cipher_type);
336	DEBUG_NET_WARN_ON_ONCE(!cipher_desc || !cipher_desc->offloadable);
337
338	buf_len = cipher_desc->salt + cipher_desc->iv + TLS_AAD_SPACE_SIZE +
339		  sync_size + cipher_desc->tag;
340	buf = kmalloc(buf_len, GFP_ATOMIC);
341	if (!buf)
342		goto free_req;
343
344	iv = buf;
345	salt = crypto_info_salt(&tls_ctx->crypto_send.info, cipher_desc);
346	memcpy(iv, salt, cipher_desc->salt);
347	aad = buf + cipher_desc->salt + cipher_desc->iv;
 
348	dummy_buf = aad + TLS_AAD_SPACE_SIZE;
349
350	nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
351	if (!nskb)
352		goto free_buf;
353
354	skb_reserve(nskb, skb_headroom(skb));
355
356	fill_sg_out(sg_out, buf, tls_ctx, nskb, tcp_payload_offset,
357		    payload_len, sync_size, dummy_buf);
358
359	if (tls_enc_records(aead_req, ctx->aead_send, sg_in, sg_out, aad, iv,
360			    rcd_sn, sync_size + payload_len,
361			    &tls_ctx->prot_info) < 0)
362		goto free_nskb;
363
364	complete_skb(nskb, skb, tcp_payload_offset);
365
366	/* validate_xmit_skb_list assumes that if the skb wasn't segmented
367	 * nskb->prev will point to the skb itself
368	 */
369	nskb->prev = nskb;
370
371free_buf:
372	kfree(buf);
373free_req:
374	kfree(aead_req);
375	return nskb;
376free_nskb:
377	kfree_skb(nskb);
378	nskb = NULL;
379	goto free_buf;
380}
381
382static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
383{
384	int tcp_payload_offset = skb_tcp_all_headers(skb);
385	struct tls_context *tls_ctx = tls_get_ctx(sk);
386	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
387	int payload_len = skb->len - tcp_payload_offset;
388	struct scatterlist *sg_in, sg_out[3];
389	struct sk_buff *nskb = NULL;
390	int sg_in_max_elements;
391	int resync_sgs = 0;
392	s32 sync_size = 0;
393	u64 rcd_sn;
394
395	/* worst case is:
396	 * MAX_SKB_FRAGS in tls_record_info
397	 * MAX_SKB_FRAGS + 1 in SKB head and frags.
398	 */
399	sg_in_max_elements = 2 * MAX_SKB_FRAGS + 1;
400
401	if (!payload_len)
402		return skb;
403
404	sg_in = kmalloc_array(sg_in_max_elements, sizeof(*sg_in), GFP_ATOMIC);
405	if (!sg_in)
406		goto free_orig;
407
408	sg_init_table(sg_in, sg_in_max_elements);
409	sg_init_table(sg_out, ARRAY_SIZE(sg_out));
410
411	if (fill_sg_in(sg_in, skb, ctx, &rcd_sn, &sync_size, &resync_sgs)) {
412		/* bypass packets before kernel TLS socket option was set */
413		if (sync_size < 0 && payload_len <= -sync_size)
414			nskb = skb_get(skb);
415		goto put_sg;
416	}
417
418	nskb = tls_enc_skb(tls_ctx, sg_out, sg_in, skb, sync_size, rcd_sn);
419
420put_sg:
421	while (resync_sgs)
422		put_page(sg_page(&sg_in[--resync_sgs]));
423	kfree(sg_in);
424free_orig:
425	if (nskb)
426		consume_skb(skb);
427	else
428		kfree_skb(skb);
429	return nskb;
430}
431
432struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
433				      struct net_device *dev,
434				      struct sk_buff *skb)
435{
436	if (dev == rcu_dereference_bh(tls_get_ctx(sk)->netdev) ||
437	    netif_is_bond_master(dev))
438		return skb;
439
440	return tls_sw_fallback(sk, skb);
441}
442EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
443
444struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
445					 struct net_device *dev,
446					 struct sk_buff *skb)
447{
448	return tls_sw_fallback(sk, skb);
449}
450
451struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
452{
453	return tls_sw_fallback(skb->sk, skb);
454}
455EXPORT_SYMBOL_GPL(tls_encrypt_skb);
456
457int tls_sw_fallback_init(struct sock *sk,
458			 struct tls_offload_context_tx *offload_ctx,
459			 struct tls_crypto_info *crypto_info)
460{
461	const struct tls_cipher_desc *cipher_desc;
462	int rc;
463
464	cipher_desc = get_cipher_desc(crypto_info->cipher_type);
465	if (!cipher_desc || !cipher_desc->offloadable)
466		return -EINVAL;
467
468	offload_ctx->aead_send =
469	    crypto_alloc_aead(cipher_desc->cipher_name, 0, CRYPTO_ALG_ASYNC);
470	if (IS_ERR(offload_ctx->aead_send)) {
471		rc = PTR_ERR(offload_ctx->aead_send);
472		pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc);
473		offload_ctx->aead_send = NULL;
474		goto err_out;
475	}
476
477	rc = crypto_aead_setkey(offload_ctx->aead_send,
478				crypto_info_key(crypto_info, cipher_desc),
479				cipher_desc->key);
 
480	if (rc)
481		goto free_aead;
482
483	rc = crypto_aead_setauthsize(offload_ctx->aead_send, cipher_desc->tag);
 
484	if (rc)
485		goto free_aead;
486
487	return 0;
488free_aead:
489	crypto_free_aead(offload_ctx->aead_send);
490err_out:
491	return rc;
492}