Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
  2 *
  3 * This software is available to you under a choice of one of two
  4 * licenses.  You may choose to be licensed under the terms of the GNU
  5 * General Public License (GPL) Version 2, available from the file
  6 * COPYING in the main directory of this source tree, or the
  7 * OpenIB.org BSD license below:
  8 *
  9 *     Redistribution and use in source and binary forms, with or
 10 *     without modification, are permitted provided that the following
 11 *     conditions are met:
 12 *
 13 *      - Redistributions of source code must retain the above
 14 *        copyright notice, this list of conditions and the following
 15 *        disclaimer.
 16 *
 17 *      - Redistributions in binary form must reproduce the above
 18 *        copyright notice, this list of conditions and the following
 19 *        disclaimer in the documentation and/or other materials
 20 *        provided with the distribution.
 21 *
 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 29 * SOFTWARE.
 30 */
 31
 32#include <net/tls.h>
 33#include <crypto/aead.h>
 34#include <crypto/scatterwalk.h>
 35#include <net/ip6_checksum.h>
 36#include <linux/skbuff_ref.h>
 37
 38#include "tls.h"
 39
 40static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk)
 41{
 42	struct scatterlist *src = walk->sg;
 43	int diff = walk->offset - src->offset;
 44
 45	sg_set_page(sg, sg_page(src),
 46		    src->length - diff, walk->offset);
 47
 48	scatterwalk_crypto_chain(sg, sg_next(src), 2);
 49}
 50
 51static int tls_enc_record(struct aead_request *aead_req,
 52			  struct crypto_aead *aead, char *aad,
 53			  char *iv, __be64 rcd_sn,
 54			  struct scatter_walk *in,
 55			  struct scatter_walk *out, int *in_len,
 56			  struct tls_prot_info *prot)
 57{
 58	unsigned char buf[TLS_HEADER_SIZE + TLS_MAX_IV_SIZE];
 59	const struct tls_cipher_desc *cipher_desc;
 60	struct scatterlist sg_in[3];
 61	struct scatterlist sg_out[3];
 62	unsigned int buf_size;
 63	u16 len;
 64	int rc;
 65
 66	cipher_desc = get_cipher_desc(prot->cipher_type);
 67	DEBUG_NET_WARN_ON_ONCE(!cipher_desc || !cipher_desc->offloadable);
 68
 69	buf_size = TLS_HEADER_SIZE + cipher_desc->iv;
 70	len = min_t(int, *in_len, buf_size);
 71
 72	scatterwalk_copychunks(buf, in, len, 0);
 73	scatterwalk_copychunks(buf, out, len, 1);
 74
 75	*in_len -= len;
 76	if (!*in_len)
 77		return 0;
 78
 79	scatterwalk_pagedone(in, 0, 1);
 80	scatterwalk_pagedone(out, 1, 1);
 81
 82	len = buf[4] | (buf[3] << 8);
 83	len -= cipher_desc->iv;
 84
 85	tls_make_aad(aad, len - cipher_desc->tag, (char *)&rcd_sn, buf[0], prot);
 86
 87	memcpy(iv + cipher_desc->salt, buf + TLS_HEADER_SIZE, cipher_desc->iv);
 88
 89	sg_init_table(sg_in, ARRAY_SIZE(sg_in));
 90	sg_init_table(sg_out, ARRAY_SIZE(sg_out));
 91	sg_set_buf(sg_in, aad, TLS_AAD_SPACE_SIZE);
 92	sg_set_buf(sg_out, aad, TLS_AAD_SPACE_SIZE);
 93	chain_to_walk(sg_in + 1, in);
 94	chain_to_walk(sg_out + 1, out);
 95
 96	*in_len -= len;
 97	if (*in_len < 0) {
 98		*in_len += cipher_desc->tag;
 99		/* the input buffer doesn't contain the entire record.
100		 * trim len accordingly. The resulting authentication tag
101		 * will contain garbage, but we don't care, so we won't
102		 * include any of it in the output skb
103		 * Note that we assume the output buffer length
104		 * is larger then input buffer length + tag size
105		 */
106		if (*in_len < 0)
107			len += *in_len;
108
109		*in_len = 0;
110	}
111
112	if (*in_len) {
113		scatterwalk_copychunks(NULL, in, len, 2);
114		scatterwalk_pagedone(in, 0, 1);
115		scatterwalk_copychunks(NULL, out, len, 2);
116		scatterwalk_pagedone(out, 1, 1);
117	}
118
119	len -= cipher_desc->tag;
120	aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv);
121
122	rc = crypto_aead_encrypt(aead_req);
123
124	return rc;
125}
126
127static void tls_init_aead_request(struct aead_request *aead_req,
128				  struct crypto_aead *aead)
129{
130	aead_request_set_tfm(aead_req, aead);
131	aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
132}
133
134static struct aead_request *tls_alloc_aead_request(struct crypto_aead *aead,
135						   gfp_t flags)
136{
137	unsigned int req_size = sizeof(struct aead_request) +
138		crypto_aead_reqsize(aead);
139	struct aead_request *aead_req;
140
141	aead_req = kzalloc(req_size, flags);
142	if (aead_req)
143		tls_init_aead_request(aead_req, aead);
144	return aead_req;
145}
146
147static int tls_enc_records(struct aead_request *aead_req,
148			   struct crypto_aead *aead, struct scatterlist *sg_in,
149			   struct scatterlist *sg_out, char *aad, char *iv,
150			   u64 rcd_sn, int len, struct tls_prot_info *prot)
151{
152	struct scatter_walk out, in;
153	int rc;
154
155	scatterwalk_start(&in, sg_in);
156	scatterwalk_start(&out, sg_out);
157
158	do {
159		rc = tls_enc_record(aead_req, aead, aad, iv,
160				    cpu_to_be64(rcd_sn), &in, &out, &len, prot);
161		rcd_sn++;
162
163	} while (rc == 0 && len);
164
165	scatterwalk_done(&in, 0, 0);
166	scatterwalk_done(&out, 1, 0);
167
168	return rc;
169}
170
171/* Can't use icsk->icsk_af_ops->send_check here because the ip addresses
172 * might have been changed by NAT.
173 */
174static void update_chksum(struct sk_buff *skb, int headln)
175{
176	struct tcphdr *th = tcp_hdr(skb);
177	int datalen = skb->len - headln;
178	const struct ipv6hdr *ipv6h;
179	const struct iphdr *iph;
180
181	/* We only changed the payload so if we are using partial we don't
182	 * need to update anything.
183	 */
184	if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
185		return;
186
187	skb->ip_summed = CHECKSUM_PARTIAL;
188	skb->csum_start = skb_transport_header(skb) - skb->head;
189	skb->csum_offset = offsetof(struct tcphdr, check);
190
191	if (skb->sk->sk_family == AF_INET6) {
192		ipv6h = ipv6_hdr(skb);
193		th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
194					     datalen, IPPROTO_TCP, 0);
195	} else {
196		iph = ip_hdr(skb);
197		th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen,
198					       IPPROTO_TCP, 0);
199	}
200}
201
202static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
203{
204	struct sock *sk = skb->sk;
205	int delta;
206
207	skb_copy_header(nskb, skb);
208
209	skb_put(nskb, skb->len);
210	memcpy(nskb->data, skb->data, headln);
211
212	nskb->destructor = skb->destructor;
213	nskb->sk = sk;
214	skb->destructor = NULL;
215	skb->sk = NULL;
216
217	update_chksum(nskb, headln);
218
219	/* sock_efree means skb must gone through skb_orphan_partial() */
220	if (nskb->destructor == sock_efree)
221		return;
222
223	delta = nskb->truesize - skb->truesize;
224	if (likely(delta < 0))
225		WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
226	else if (delta)
227		refcount_add(delta, &sk->sk_wmem_alloc);
228}
229
230/* This function may be called after the user socket is already
231 * closed so make sure we don't use anything freed during
232 * tls_sk_proto_close here
233 */
234
235static int fill_sg_in(struct scatterlist *sg_in,
236		      struct sk_buff *skb,
237		      struct tls_offload_context_tx *ctx,
238		      u64 *rcd_sn,
239		      s32 *sync_size,
240		      int *resync_sgs)
241{
242	int tcp_payload_offset = skb_tcp_all_headers(skb);
243	int payload_len = skb->len - tcp_payload_offset;
244	u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
245	struct tls_record_info *record;
246	unsigned long flags;
247	int remaining;
248	int i;
249
250	spin_lock_irqsave(&ctx->lock, flags);
251	record = tls_get_record(ctx, tcp_seq, rcd_sn);
252	if (!record) {
253		spin_unlock_irqrestore(&ctx->lock, flags);
254		return -EINVAL;
255	}
256
257	*sync_size = tcp_seq - tls_record_start_seq(record);
258	if (*sync_size < 0) {
259		int is_start_marker = tls_record_is_start_marker(record);
260
261		spin_unlock_irqrestore(&ctx->lock, flags);
262		/* This should only occur if the relevant record was
263		 * already acked. In that case it should be ok
264		 * to drop the packet and avoid retransmission.
265		 *
266		 * There is a corner case where the packet contains
267		 * both an acked and a non-acked record.
268		 * We currently don't handle that case and rely
269		 * on TCP to retransmit a packet that doesn't contain
270		 * already acked payload.
271		 */
272		if (!is_start_marker)
273			*sync_size = 0;
274		return -EINVAL;
275	}
276
277	remaining = *sync_size;
278	for (i = 0; remaining > 0; i++) {
279		skb_frag_t *frag = &record->frags[i];
280
281		__skb_frag_ref(frag);
282		sg_set_page(sg_in + i, skb_frag_page(frag),
283			    skb_frag_size(frag), skb_frag_off(frag));
284
285		remaining -= skb_frag_size(frag);
286
287		if (remaining < 0)
288			sg_in[i].length += remaining;
289	}
290	*resync_sgs = i;
291
292	spin_unlock_irqrestore(&ctx->lock, flags);
293	if (skb_to_sgvec(skb, &sg_in[i], tcp_payload_offset, payload_len) < 0)
294		return -EINVAL;
295
296	return 0;
297}
298
299static void fill_sg_out(struct scatterlist sg_out[3], void *buf,
300			struct tls_context *tls_ctx,
301			struct sk_buff *nskb,
302			int tcp_payload_offset,
303			int payload_len,
304			int sync_size,
305			void *dummy_buf)
306{
307	const struct tls_cipher_desc *cipher_desc =
308		get_cipher_desc(tls_ctx->crypto_send.info.cipher_type);
309
310	sg_set_buf(&sg_out[0], dummy_buf, sync_size);
311	sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len);
312	/* Add room for authentication tag produced by crypto */
313	dummy_buf += sync_size;
314	sg_set_buf(&sg_out[2], dummy_buf, cipher_desc->tag);
315}
316
317static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
318				   struct scatterlist sg_out[3],
319				   struct scatterlist *sg_in,
320				   struct sk_buff *skb,
321				   s32 sync_size, u64 rcd_sn)
322{
323	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
324	int tcp_payload_offset = skb_tcp_all_headers(skb);
325	int payload_len = skb->len - tcp_payload_offset;
326	const struct tls_cipher_desc *cipher_desc;
327	void *buf, *iv, *aad, *dummy_buf, *salt;
328	struct aead_request *aead_req;
329	struct sk_buff *nskb = NULL;
330	int buf_len;
331
332	aead_req = tls_alloc_aead_request(ctx->aead_send, GFP_ATOMIC);
333	if (!aead_req)
334		return NULL;
335
336	cipher_desc = get_cipher_desc(tls_ctx->crypto_send.info.cipher_type);
337	DEBUG_NET_WARN_ON_ONCE(!cipher_desc || !cipher_desc->offloadable);
338
339	buf_len = cipher_desc->salt + cipher_desc->iv + TLS_AAD_SPACE_SIZE +
340		  sync_size + cipher_desc->tag;
341	buf = kmalloc(buf_len, GFP_ATOMIC);
342	if (!buf)
343		goto free_req;
344
345	iv = buf;
346	salt = crypto_info_salt(&tls_ctx->crypto_send.info, cipher_desc);
347	memcpy(iv, salt, cipher_desc->salt);
348	aad = buf + cipher_desc->salt + cipher_desc->iv;
349	dummy_buf = aad + TLS_AAD_SPACE_SIZE;
350
351	nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
352	if (!nskb)
353		goto free_buf;
354
355	skb_reserve(nskb, skb_headroom(skb));
356
357	fill_sg_out(sg_out, buf, tls_ctx, nskb, tcp_payload_offset,
358		    payload_len, sync_size, dummy_buf);
359
360	if (tls_enc_records(aead_req, ctx->aead_send, sg_in, sg_out, aad, iv,
361			    rcd_sn, sync_size + payload_len,
362			    &tls_ctx->prot_info) < 0)
363		goto free_nskb;
364
365	complete_skb(nskb, skb, tcp_payload_offset);
366
367	/* validate_xmit_skb_list assumes that if the skb wasn't segmented
368	 * nskb->prev will point to the skb itself
369	 */
370	nskb->prev = nskb;
371
372free_buf:
373	kfree(buf);
374free_req:
375	kfree(aead_req);
376	return nskb;
377free_nskb:
378	kfree_skb(nskb);
379	nskb = NULL;
380	goto free_buf;
381}
382
383static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
384{
385	int tcp_payload_offset = skb_tcp_all_headers(skb);
386	struct tls_context *tls_ctx = tls_get_ctx(sk);
387	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
388	int payload_len = skb->len - tcp_payload_offset;
389	struct scatterlist *sg_in, sg_out[3];
390	struct sk_buff *nskb = NULL;
391	int sg_in_max_elements;
392	int resync_sgs = 0;
393	s32 sync_size = 0;
394	u64 rcd_sn;
395
396	/* worst case is:
397	 * MAX_SKB_FRAGS in tls_record_info
398	 * MAX_SKB_FRAGS + 1 in SKB head and frags.
399	 */
400	sg_in_max_elements = 2 * MAX_SKB_FRAGS + 1;
401
402	if (!payload_len)
403		return skb;
404
405	sg_in = kmalloc_array(sg_in_max_elements, sizeof(*sg_in), GFP_ATOMIC);
406	if (!sg_in)
407		goto free_orig;
408
409	sg_init_table(sg_in, sg_in_max_elements);
410	sg_init_table(sg_out, ARRAY_SIZE(sg_out));
411
412	if (fill_sg_in(sg_in, skb, ctx, &rcd_sn, &sync_size, &resync_sgs)) {
413		/* bypass packets before kernel TLS socket option was set */
414		if (sync_size < 0 && payload_len <= -sync_size)
415			nskb = skb_get(skb);
416		goto put_sg;
417	}
418
419	nskb = tls_enc_skb(tls_ctx, sg_out, sg_in, skb, sync_size, rcd_sn);
420
421put_sg:
422	while (resync_sgs)
423		put_page(sg_page(&sg_in[--resync_sgs]));
424	kfree(sg_in);
425free_orig:
426	if (nskb)
427		consume_skb(skb);
428	else
429		kfree_skb(skb);
430	return nskb;
431}
432
433struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
434				      struct net_device *dev,
435				      struct sk_buff *skb)
436{
437	if (dev == rcu_dereference_bh(tls_get_ctx(sk)->netdev) ||
438	    netif_is_bond_master(dev))
439		return skb;
440
441	return tls_sw_fallback(sk, skb);
442}
443EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
444
445struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
446					 struct net_device *dev,
447					 struct sk_buff *skb)
448{
449	return tls_sw_fallback(sk, skb);
450}
451
452struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
453{
454	return tls_sw_fallback(skb->sk, skb);
455}
456EXPORT_SYMBOL_GPL(tls_encrypt_skb);
457
458int tls_sw_fallback_init(struct sock *sk,
459			 struct tls_offload_context_tx *offload_ctx,
460			 struct tls_crypto_info *crypto_info)
461{
462	const struct tls_cipher_desc *cipher_desc;
463	int rc;
464
465	cipher_desc = get_cipher_desc(crypto_info->cipher_type);
466	if (!cipher_desc || !cipher_desc->offloadable)
467		return -EINVAL;
468
469	offload_ctx->aead_send =
470	    crypto_alloc_aead(cipher_desc->cipher_name, 0, CRYPTO_ALG_ASYNC);
471	if (IS_ERR(offload_ctx->aead_send)) {
472		rc = PTR_ERR(offload_ctx->aead_send);
473		pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc);
474		offload_ctx->aead_send = NULL;
475		goto err_out;
476	}
477
478	rc = crypto_aead_setkey(offload_ctx->aead_send,
479				crypto_info_key(crypto_info, cipher_desc),
480				cipher_desc->key);
481	if (rc)
482		goto free_aead;
483
484	rc = crypto_aead_setauthsize(offload_ctx->aead_send, cipher_desc->tag);
485	if (rc)
486		goto free_aead;
487
488	return 0;
489free_aead:
490	crypto_free_aead(offload_ctx->aead_send);
491err_out:
492	return rc;
493}