Linux Audio

Check our new training course

Loading...
v6.2
   1/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
   2 *
   3 * This software is available to you under a choice of one of two
   4 * licenses.  You may choose to be licensed under the terms of the GNU
   5 * General Public License (GPL) Version 2, available from the file
   6 * COPYING in the main directory of this source tree, or the
   7 * OpenIB.org BSD license below:
   8 *
   9 *     Redistribution and use in source and binary forms, with or
  10 *     without modification, are permitted provided that the following
  11 *     conditions are met:
  12 *
  13 *      - Redistributions of source code must retain the above
  14 *        copyright notice, this list of conditions and the following
  15 *        disclaimer.
  16 *
  17 *      - Redistributions in binary form must reproduce the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer in the documentation and/or other materials
  20 *        provided with the distribution.
  21 *
  22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  29 * SOFTWARE.
  30 */
  31
  32#include <crypto/aead.h>
  33#include <linux/highmem.h>
  34#include <linux/module.h>
  35#include <linux/netdevice.h>
  36#include <net/dst.h>
  37#include <net/inet_connection_sock.h>
  38#include <net/tcp.h>
  39#include <net/tls.h>
 
  40
  41#include "tls.h"
  42#include "trace.h"
  43
  44/* device_offload_lock is used to synchronize tls_dev_add
  45 * against NETDEV_DOWN notifications.
  46 */
  47static DECLARE_RWSEM(device_offload_lock);
  48
  49static struct workqueue_struct *destruct_wq __read_mostly;
  50
  51static LIST_HEAD(tls_device_list);
  52static LIST_HEAD(tls_device_down_list);
  53static DEFINE_SPINLOCK(tls_device_lock);
  54
 
 
  55static void tls_device_free_ctx(struct tls_context *ctx)
  56{
  57	if (ctx->tx_conf == TLS_HW) {
  58		kfree(tls_offload_ctx_tx(ctx));
  59		kfree(ctx->tx.rec_seq);
  60		kfree(ctx->tx.iv);
  61	}
  62
  63	if (ctx->rx_conf == TLS_HW)
  64		kfree(tls_offload_ctx_rx(ctx));
  65
  66	tls_ctx_free(NULL, ctx);
  67}
  68
  69static void tls_device_tx_del_task(struct work_struct *work)
  70{
  71	struct tls_offload_context_tx *offload_ctx =
  72		container_of(work, struct tls_offload_context_tx, destruct_work);
  73	struct tls_context *ctx = offload_ctx->ctx;
  74	struct net_device *netdev;
  75
  76	/* Safe, because this is the destroy flow, refcount is 0, so
  77	 * tls_device_down can't store this field in parallel.
  78	 */
  79	netdev = rcu_dereference_protected(ctx->netdev,
  80					   !refcount_read(&ctx->refcount));
  81
  82	netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX);
  83	dev_put(netdev);
  84	ctx->netdev = NULL;
  85	tls_device_free_ctx(ctx);
  86}
  87
  88static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
  89{
  90	struct net_device *netdev;
  91	unsigned long flags;
  92	bool async_cleanup;
  93
  94	spin_lock_irqsave(&tls_device_lock, flags);
  95	if (unlikely(!refcount_dec_and_test(&ctx->refcount))) {
  96		spin_unlock_irqrestore(&tls_device_lock, flags);
  97		return;
  98	}
  99
 100	list_del(&ctx->list); /* Remove from tls_device_list / tls_device_down_list */
 101
 102	/* Safe, because this is the destroy flow, refcount is 0, so
 103	 * tls_device_down can't store this field in parallel.
 104	 */
 105	netdev = rcu_dereference_protected(ctx->netdev,
 106					   !refcount_read(&ctx->refcount));
 107
 108	async_cleanup = netdev && ctx->tx_conf == TLS_HW;
 109	if (async_cleanup) {
 110		struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(ctx);
 111
 112		/* queue_work inside the spinlock
 113		 * to make sure tls_device_down waits for that work.
 114		 */
 115		queue_work(destruct_wq, &offload_ctx->destruct_work);
 116	}
 117	spin_unlock_irqrestore(&tls_device_lock, flags);
 118
 119	if (!async_cleanup)
 120		tls_device_free_ctx(ctx);
 121}
 122
 123/* We assume that the socket is already connected */
 124static struct net_device *get_netdev_for_sock(struct sock *sk)
 125{
 126	struct dst_entry *dst = sk_dst_get(sk);
 127	struct net_device *netdev = NULL;
 128
 129	if (likely(dst)) {
 130		netdev = netdev_sk_get_lowest_dev(dst->dev, sk);
 131		dev_hold(netdev);
 132	}
 133
 134	dst_release(dst);
 135
 136	return netdev;
 137}
 138
 139static void destroy_record(struct tls_record_info *record)
 140{
 141	int i;
 142
 143	for (i = 0; i < record->num_frags; i++)
 144		__skb_frag_unref(&record->frags[i], false);
 145	kfree(record);
 146}
 147
 148static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
 149{
 150	struct tls_record_info *info, *temp;
 151
 152	list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
 153		list_del(&info->list);
 154		destroy_record(info);
 155	}
 156
 157	offload_ctx->retransmit_hint = NULL;
 158}
 159
 160static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
 161{
 162	struct tls_context *tls_ctx = tls_get_ctx(sk);
 163	struct tls_record_info *info, *temp;
 164	struct tls_offload_context_tx *ctx;
 165	u64 deleted_records = 0;
 166	unsigned long flags;
 167
 168	if (!tls_ctx)
 169		return;
 170
 171	ctx = tls_offload_ctx_tx(tls_ctx);
 172
 173	spin_lock_irqsave(&ctx->lock, flags);
 174	info = ctx->retransmit_hint;
 175	if (info && !before(acked_seq, info->end_seq))
 176		ctx->retransmit_hint = NULL;
 177
 178	list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
 179		if (before(acked_seq, info->end_seq))
 180			break;
 181		list_del(&info->list);
 182
 183		destroy_record(info);
 184		deleted_records++;
 185	}
 186
 187	ctx->unacked_record_sn += deleted_records;
 188	spin_unlock_irqrestore(&ctx->lock, flags);
 189}
 190
 191/* At this point, there should be no references on this
 192 * socket and no in-flight SKBs associated with this
 193 * socket, so it is safe to free all the resources.
 194 */
 195void tls_device_sk_destruct(struct sock *sk)
 196{
 197	struct tls_context *tls_ctx = tls_get_ctx(sk);
 198	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
 199
 200	tls_ctx->sk_destruct(sk);
 201
 202	if (tls_ctx->tx_conf == TLS_HW) {
 203		if (ctx->open_record)
 204			destroy_record(ctx->open_record);
 205		delete_all_records(ctx);
 206		crypto_free_aead(ctx->aead_send);
 207		clean_acked_data_disable(inet_csk(sk));
 208	}
 209
 210	tls_device_queue_ctx_destruction(tls_ctx);
 211}
 212EXPORT_SYMBOL_GPL(tls_device_sk_destruct);
 213
 214void tls_device_free_resources_tx(struct sock *sk)
 215{
 216	struct tls_context *tls_ctx = tls_get_ctx(sk);
 217
 218	tls_free_partial_record(sk, tls_ctx);
 219}
 220
 221void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq)
 222{
 223	struct tls_context *tls_ctx = tls_get_ctx(sk);
 224
 225	trace_tls_device_tx_resync_req(sk, got_seq, exp_seq);
 226	WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
 227}
 228EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request);
 229
 230static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
 231				 u32 seq)
 232{
 233	struct net_device *netdev;
 234	struct sk_buff *skb;
 235	int err = 0;
 236	u8 *rcd_sn;
 237
 238	skb = tcp_write_queue_tail(sk);
 239	if (skb)
 240		TCP_SKB_CB(skb)->eor = 1;
 241
 242	rcd_sn = tls_ctx->tx.rec_seq;
 243
 244	trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
 245	down_read(&device_offload_lock);
 246	netdev = rcu_dereference_protected(tls_ctx->netdev,
 247					   lockdep_is_held(&device_offload_lock));
 248	if (netdev)
 249		err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
 250							 rcd_sn,
 251							 TLS_OFFLOAD_CTX_DIR_TX);
 252	up_read(&device_offload_lock);
 253	if (err)
 254		return;
 255
 256	clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
 257}
 258
 259static void tls_append_frag(struct tls_record_info *record,
 260			    struct page_frag *pfrag,
 261			    int size)
 262{
 263	skb_frag_t *frag;
 264
 265	frag = &record->frags[record->num_frags - 1];
 266	if (skb_frag_page(frag) == pfrag->page &&
 267	    skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) {
 268		skb_frag_size_add(frag, size);
 269	} else {
 270		++frag;
 271		__skb_frag_set_page(frag, pfrag->page);
 272		skb_frag_off_set(frag, pfrag->offset);
 273		skb_frag_size_set(frag, size);
 274		++record->num_frags;
 275		get_page(pfrag->page);
 276	}
 277
 278	pfrag->offset += size;
 279	record->len += size;
 280}
 281
 282static int tls_push_record(struct sock *sk,
 283			   struct tls_context *ctx,
 284			   struct tls_offload_context_tx *offload_ctx,
 285			   struct tls_record_info *record,
 286			   int flags)
 287{
 288	struct tls_prot_info *prot = &ctx->prot_info;
 289	struct tcp_sock *tp = tcp_sk(sk);
 290	skb_frag_t *frag;
 291	int i;
 292
 293	record->end_seq = tp->write_seq + record->len;
 294	list_add_tail_rcu(&record->list, &offload_ctx->records_list);
 295	offload_ctx->open_record = NULL;
 296
 297	if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
 298		tls_device_resync_tx(sk, ctx, tp->write_seq);
 299
 300	tls_advance_record_sn(sk, prot, &ctx->tx);
 301
 302	for (i = 0; i < record->num_frags; i++) {
 303		frag = &record->frags[i];
 304		sg_unmark_end(&offload_ctx->sg_tx_data[i]);
 305		sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
 306			    skb_frag_size(frag), skb_frag_off(frag));
 307		sk_mem_charge(sk, skb_frag_size(frag));
 308		get_page(skb_frag_page(frag));
 309	}
 310	sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
 311
 312	/* all ready, send */
 313	return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
 314}
 315
 316static int tls_device_record_close(struct sock *sk,
 317				   struct tls_context *ctx,
 318				   struct tls_record_info *record,
 319				   struct page_frag *pfrag,
 320				   unsigned char record_type)
 321{
 322	struct tls_prot_info *prot = &ctx->prot_info;
 323	int ret;
 324
 325	/* append tag
 326	 * device will fill in the tag, we just need to append a placeholder
 327	 * use socket memory to improve coalescing (re-using a single buffer
 328	 * increases frag count)
 329	 * if we can't allocate memory now, steal some back from data
 330	 */
 331	if (likely(skb_page_frag_refill(prot->tag_size, pfrag,
 332					sk->sk_allocation))) {
 333		ret = 0;
 334		tls_append_frag(record, pfrag, prot->tag_size);
 335	} else {
 336		ret = prot->tag_size;
 337		if (record->len <= prot->overhead_size)
 338			return -ENOMEM;
 339	}
 
 340
 341	/* fill prepend */
 342	tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
 343			 record->len - prot->overhead_size,
 344			 record_type);
 345	return ret;
 346}
 347
 348static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
 349				 struct page_frag *pfrag,
 350				 size_t prepend_size)
 351{
 352	struct tls_record_info *record;
 353	skb_frag_t *frag;
 354
 355	record = kmalloc(sizeof(*record), GFP_KERNEL);
 356	if (!record)
 357		return -ENOMEM;
 358
 359	frag = &record->frags[0];
 360	__skb_frag_set_page(frag, pfrag->page);
 361	skb_frag_off_set(frag, pfrag->offset);
 362	skb_frag_size_set(frag, prepend_size);
 363
 364	get_page(pfrag->page);
 365	pfrag->offset += prepend_size;
 366
 367	record->num_frags = 1;
 368	record->len = prepend_size;
 369	offload_ctx->open_record = record;
 370	return 0;
 371}
 372
 373static int tls_do_allocation(struct sock *sk,
 374			     struct tls_offload_context_tx *offload_ctx,
 375			     struct page_frag *pfrag,
 376			     size_t prepend_size)
 377{
 378	int ret;
 379
 380	if (!offload_ctx->open_record) {
 381		if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
 382						   sk->sk_allocation))) {
 383			READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
 384			sk_stream_moderate_sndbuf(sk);
 385			return -ENOMEM;
 386		}
 387
 388		ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
 389		if (ret)
 390			return ret;
 391
 392		if (pfrag->size > pfrag->offset)
 393			return 0;
 394	}
 395
 396	if (!sk_page_frag_refill(sk, pfrag))
 397		return -ENOMEM;
 398
 399	return 0;
 400}
 401
 402static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i)
 403{
 404	size_t pre_copy, nocache;
 405
 406	pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1);
 407	if (pre_copy) {
 408		pre_copy = min(pre_copy, bytes);
 409		if (copy_from_iter(addr, pre_copy, i) != pre_copy)
 410			return -EFAULT;
 411		bytes -= pre_copy;
 412		addr += pre_copy;
 413	}
 414
 415	nocache = round_down(bytes, SMP_CACHE_BYTES);
 416	if (copy_from_iter_nocache(addr, nocache, i) != nocache)
 417		return -EFAULT;
 418	bytes -= nocache;
 419	addr += nocache;
 420
 421	if (bytes && copy_from_iter(addr, bytes, i) != bytes)
 422		return -EFAULT;
 423
 424	return 0;
 425}
 426
 427union tls_iter_offset {
 428	struct iov_iter *msg_iter;
 429	int offset;
 430};
 431
 432static int tls_push_data(struct sock *sk,
 433			 union tls_iter_offset iter_offset,
 434			 size_t size, int flags,
 435			 unsigned char record_type,
 436			 struct page *zc_page)
 437{
 438	struct tls_context *tls_ctx = tls_get_ctx(sk);
 439	struct tls_prot_info *prot = &tls_ctx->prot_info;
 440	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
 441	struct tls_record_info *record;
 442	int tls_push_record_flags;
 443	struct page_frag *pfrag;
 444	size_t orig_size = size;
 445	u32 max_open_record_len;
 446	bool more = false;
 447	bool done = false;
 448	int copy, rc = 0;
 449	long timeo;
 450
 451	if (flags &
 452	    ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
 
 453		return -EOPNOTSUPP;
 454
 
 
 
 455	if (unlikely(sk->sk_err))
 456		return -sk->sk_err;
 457
 458	flags |= MSG_SENDPAGE_DECRYPTED;
 459	tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
 460
 461	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
 462	if (tls_is_partially_sent_record(tls_ctx)) {
 463		rc = tls_push_partial_record(sk, tls_ctx, flags);
 464		if (rc < 0)
 465			return rc;
 466	}
 467
 468	pfrag = sk_page_frag(sk);
 469
 470	/* TLS_HEADER_SIZE is not counted as part of the TLS record, and
 471	 * we need to leave room for an authentication tag.
 472	 */
 473	max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
 474			      prot->prepend_size;
 475	do {
 476		rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
 477		if (unlikely(rc)) {
 478			rc = sk_stream_wait_memory(sk, &timeo);
 479			if (!rc)
 480				continue;
 481
 482			record = ctx->open_record;
 483			if (!record)
 484				break;
 485handle_error:
 486			if (record_type != TLS_RECORD_TYPE_DATA) {
 487				/* avoid sending partial
 488				 * record with type !=
 489				 * application_data
 490				 */
 491				size = orig_size;
 492				destroy_record(record);
 493				ctx->open_record = NULL;
 494			} else if (record->len > prot->prepend_size) {
 495				goto last_record;
 496			}
 497
 498			break;
 499		}
 500
 501		record = ctx->open_record;
 502
 503		copy = min_t(size_t, size, max_open_record_len - record->len);
 504		if (copy && zc_page) {
 505			struct page_frag zc_pfrag;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 506
 507			zc_pfrag.page = zc_page;
 508			zc_pfrag.offset = iter_offset.offset;
 509			zc_pfrag.size = copy;
 510			tls_append_frag(record, &zc_pfrag, copy);
 511		} else if (copy) {
 512			copy = min_t(size_t, copy, pfrag->size - pfrag->offset);
 513
 514			rc = tls_device_copy_data(page_address(pfrag->page) +
 515						  pfrag->offset, copy,
 516						  iter_offset.msg_iter);
 517			if (rc)
 518				goto handle_error;
 519			tls_append_frag(record, pfrag, copy);
 520		}
 521
 522		size -= copy;
 523		if (!size) {
 524last_record:
 525			tls_push_record_flags = flags;
 526			if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
 527				more = true;
 528				break;
 529			}
 530
 531			done = true;
 532		}
 533
 534		if (done || record->len >= max_open_record_len ||
 535		    (record->num_frags >= MAX_SKB_FRAGS - 1)) {
 536			rc = tls_device_record_close(sk, tls_ctx, record,
 537						     pfrag, record_type);
 538			if (rc) {
 539				if (rc > 0) {
 540					size += rc;
 541				} else {
 542					size = orig_size;
 543					destroy_record(record);
 544					ctx->open_record = NULL;
 545					break;
 546				}
 547			}
 548
 549			rc = tls_push_record(sk,
 550					     tls_ctx,
 551					     ctx,
 552					     record,
 553					     tls_push_record_flags);
 554			if (rc < 0)
 555				break;
 556		}
 557	} while (!done);
 558
 559	tls_ctx->pending_open_record_frags = more;
 560
 561	if (orig_size - size > 0)
 562		rc = orig_size - size;
 563
 564	return rc;
 565}
 566
 567int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 568{
 569	unsigned char record_type = TLS_RECORD_TYPE_DATA;
 570	struct tls_context *tls_ctx = tls_get_ctx(sk);
 571	union tls_iter_offset iter;
 572	int rc;
 573
 
 
 
 574	mutex_lock(&tls_ctx->tx_lock);
 575	lock_sock(sk);
 576
 577	if (unlikely(msg->msg_controllen)) {
 578		rc = tls_process_cmsg(sk, msg, &record_type);
 579		if (rc)
 580			goto out;
 581	}
 582
 583	iter.msg_iter = &msg->msg_iter;
 584	rc = tls_push_data(sk, iter, size, msg->msg_flags, record_type, NULL);
 585
 586out:
 587	release_sock(sk);
 588	mutex_unlock(&tls_ctx->tx_lock);
 589	return rc;
 590}
 591
 592int tls_device_sendpage(struct sock *sk, struct page *page,
 593			int offset, size_t size, int flags)
 594{
 
 595	struct tls_context *tls_ctx = tls_get_ctx(sk);
 596	union tls_iter_offset iter_offset;
 597	struct iov_iter msg_iter;
 598	char *kaddr;
 599	struct kvec iov;
 600	int rc;
 601
 602	if (flags & MSG_SENDPAGE_NOTLAST)
 603		flags |= MSG_MORE;
 604
 605	mutex_lock(&tls_ctx->tx_lock);
 606	lock_sock(sk);
 607
 608	if (flags & MSG_OOB) {
 609		rc = -EOPNOTSUPP;
 610		goto out;
 611	}
 612
 613	if (tls_ctx->zerocopy_sendfile) {
 614		iter_offset.offset = offset;
 615		rc = tls_push_data(sk, iter_offset, size,
 616				   flags, TLS_RECORD_TYPE_DATA, page);
 617		goto out;
 618	}
 619
 620	kaddr = kmap(page);
 621	iov.iov_base = kaddr + offset;
 622	iov.iov_len = size;
 623	iov_iter_kvec(&msg_iter, ITER_SOURCE, &iov, 1, size);
 624	iter_offset.msg_iter = &msg_iter;
 625	rc = tls_push_data(sk, iter_offset, size, flags, TLS_RECORD_TYPE_DATA,
 626			   NULL);
 627	kunmap(page);
 628
 629out:
 630	release_sock(sk);
 631	mutex_unlock(&tls_ctx->tx_lock);
 632	return rc;
 633}
 634
 635struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
 636				       u32 seq, u64 *p_record_sn)
 637{
 638	u64 record_sn = context->hint_record_sn;
 639	struct tls_record_info *info, *last;
 640
 641	info = context->retransmit_hint;
 642	if (!info ||
 643	    before(seq, info->end_seq - info->len)) {
 644		/* if retransmit_hint is irrelevant start
 645		 * from the beginning of the list
 646		 */
 647		info = list_first_entry_or_null(&context->records_list,
 648						struct tls_record_info, list);
 649		if (!info)
 650			return NULL;
 651		/* send the start_marker record if seq number is before the
 652		 * tls offload start marker sequence number. This record is
 653		 * required to handle TCP packets which are before TLS offload
 654		 * started.
 655		 *  And if it's not start marker, look if this seq number
 656		 * belongs to the list.
 657		 */
 658		if (likely(!tls_record_is_start_marker(info))) {
 659			/* we have the first record, get the last record to see
 660			 * if this seq number belongs to the list.
 661			 */
 662			last = list_last_entry(&context->records_list,
 663					       struct tls_record_info, list);
 664
 665			if (!between(seq, tls_record_start_seq(info),
 666				     last->end_seq))
 667				return NULL;
 668		}
 669		record_sn = context->unacked_record_sn;
 670	}
 671
 672	/* We just need the _rcu for the READ_ONCE() */
 673	rcu_read_lock();
 674	list_for_each_entry_from_rcu(info, &context->records_list, list) {
 675		if (before(seq, info->end_seq)) {
 676			if (!context->retransmit_hint ||
 677			    after(info->end_seq,
 678				  context->retransmit_hint->end_seq)) {
 679				context->hint_record_sn = record_sn;
 680				context->retransmit_hint = info;
 681			}
 682			*p_record_sn = record_sn;
 683			goto exit_rcu_unlock;
 684		}
 685		record_sn++;
 686	}
 687	info = NULL;
 688
 689exit_rcu_unlock:
 690	rcu_read_unlock();
 691	return info;
 692}
 693EXPORT_SYMBOL(tls_get_record);
 694
 695static int tls_device_push_pending_record(struct sock *sk, int flags)
 696{
 697	union tls_iter_offset iter;
 698	struct iov_iter msg_iter;
 699
 700	iov_iter_kvec(&msg_iter, ITER_SOURCE, NULL, 0, 0);
 701	iter.msg_iter = &msg_iter;
 702	return tls_push_data(sk, iter, 0, flags, TLS_RECORD_TYPE_DATA, NULL);
 703}
 704
 705void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
 706{
 707	if (tls_is_partially_sent_record(ctx)) {
 708		gfp_t sk_allocation = sk->sk_allocation;
 709
 710		WARN_ON_ONCE(sk->sk_write_pending);
 711
 712		sk->sk_allocation = GFP_ATOMIC;
 713		tls_push_partial_record(sk, ctx,
 714					MSG_DONTWAIT | MSG_NOSIGNAL |
 715					MSG_SENDPAGE_DECRYPTED);
 716		sk->sk_allocation = sk_allocation;
 717	}
 718}
 719
 720static void tls_device_resync_rx(struct tls_context *tls_ctx,
 721				 struct sock *sk, u32 seq, u8 *rcd_sn)
 722{
 723	struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
 724	struct net_device *netdev;
 725
 726	trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
 727	rcu_read_lock();
 728	netdev = rcu_dereference(tls_ctx->netdev);
 729	if (netdev)
 730		netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
 731						   TLS_OFFLOAD_CTX_DIR_RX);
 732	rcu_read_unlock();
 733	TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
 734}
 735
 736static bool
 737tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
 738			   s64 resync_req, u32 *seq, u16 *rcd_delta)
 739{
 740	u32 is_async = resync_req & RESYNC_REQ_ASYNC;
 741	u32 req_seq = resync_req >> 32;
 742	u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
 743	u16 i;
 744
 745	*rcd_delta = 0;
 746
 747	if (is_async) {
 748		/* shouldn't get to wraparound:
 749		 * too long in async stage, something bad happened
 750		 */
 751		if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
 752			return false;
 753
 754		/* asynchronous stage: log all headers seq such that
 755		 * req_seq <= seq <= end_seq, and wait for real resync request
 756		 */
 757		if (before(*seq, req_seq))
 758			return false;
 759		if (!after(*seq, req_end) &&
 760		    resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
 761			resync_async->log[resync_async->loglen++] = *seq;
 762
 763		resync_async->rcd_delta++;
 764
 765		return false;
 766	}
 767
 768	/* synchronous stage: check against the logged entries and
 769	 * proceed to check the next entries if no match was found
 770	 */
 771	for (i = 0; i < resync_async->loglen; i++)
 772		if (req_seq == resync_async->log[i] &&
 773		    atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) {
 774			*rcd_delta = resync_async->rcd_delta - i;
 775			*seq = req_seq;
 776			resync_async->loglen = 0;
 777			resync_async->rcd_delta = 0;
 778			return true;
 779		}
 780
 781	resync_async->loglen = 0;
 782	resync_async->rcd_delta = 0;
 783
 784	if (req_seq == *seq &&
 785	    atomic64_try_cmpxchg(&resync_async->req,
 786				 &resync_req, 0))
 787		return true;
 788
 789	return false;
 790}
 791
 792void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
 793{
 794	struct tls_context *tls_ctx = tls_get_ctx(sk);
 795	struct tls_offload_context_rx *rx_ctx;
 796	u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
 797	u32 sock_data, is_req_pending;
 798	struct tls_prot_info *prot;
 799	s64 resync_req;
 800	u16 rcd_delta;
 801	u32 req_seq;
 802
 803	if (tls_ctx->rx_conf != TLS_HW)
 804		return;
 805	if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
 806		return;
 807
 808	prot = &tls_ctx->prot_info;
 809	rx_ctx = tls_offload_ctx_rx(tls_ctx);
 810	memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
 811
 812	switch (rx_ctx->resync_type) {
 813	case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
 814		resync_req = atomic64_read(&rx_ctx->resync_req);
 815		req_seq = resync_req >> 32;
 816		seq += TLS_HEADER_SIZE - 1;
 817		is_req_pending = resync_req;
 818
 819		if (likely(!is_req_pending) || req_seq != seq ||
 820		    !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
 821			return;
 822		break;
 823	case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
 824		if (likely(!rx_ctx->resync_nh_do_now))
 825			return;
 826
 827		/* head of next rec is already in, note that the sock_inq will
 828		 * include the currently parsed message when called from parser
 829		 */
 830		sock_data = tcp_inq(sk);
 831		if (sock_data > rcd_len) {
 832			trace_tls_device_rx_resync_nh_delay(sk, sock_data,
 833							    rcd_len);
 834			return;
 835		}
 836
 837		rx_ctx->resync_nh_do_now = 0;
 838		seq += rcd_len;
 839		tls_bigint_increment(rcd_sn, prot->rec_seq_size);
 840		break;
 841	case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC:
 842		resync_req = atomic64_read(&rx_ctx->resync_async->req);
 843		is_req_pending = resync_req;
 844		if (likely(!is_req_pending))
 845			return;
 846
 847		if (!tls_device_rx_resync_async(rx_ctx->resync_async,
 848						resync_req, &seq, &rcd_delta))
 849			return;
 850		tls_bigint_subtract(rcd_sn, rcd_delta);
 851		break;
 852	}
 853
 854	tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
 855}
 856
 857static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
 858					   struct tls_offload_context_rx *ctx,
 859					   struct sock *sk, struct sk_buff *skb)
 860{
 861	struct strp_msg *rxm;
 862
 863	/* device will request resyncs by itself based on stream scan */
 864	if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
 865		return;
 866	/* already scheduled */
 867	if (ctx->resync_nh_do_now)
 868		return;
 869	/* seen decrypted fragments since last fully-failed record */
 870	if (ctx->resync_nh_reset) {
 871		ctx->resync_nh_reset = 0;
 872		ctx->resync_nh.decrypted_failed = 1;
 873		ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
 874		return;
 875	}
 876
 877	if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
 878		return;
 879
 880	/* doing resync, bump the next target in case it fails */
 881	if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
 882		ctx->resync_nh.decrypted_tgt *= 2;
 883	else
 884		ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
 885
 886	rxm = strp_msg(skb);
 887
 888	/* head of next rec is already in, parser will sync for us */
 889	if (tcp_inq(sk) > rxm->full_len) {
 890		trace_tls_device_rx_resync_nh_schedule(sk);
 891		ctx->resync_nh_do_now = 1;
 892	} else {
 893		struct tls_prot_info *prot = &tls_ctx->prot_info;
 894		u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
 895
 896		memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
 897		tls_bigint_increment(rcd_sn, prot->rec_seq_size);
 898
 899		tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
 900				     rcd_sn);
 901	}
 902}
 903
 904static int
 905tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
 906{
 907	struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
 908	const struct tls_cipher_size_desc *cipher_sz;
 909	int err, offset, copy, data_len, pos;
 910	struct sk_buff *skb, *skb_iter;
 911	struct scatterlist sg[1];
 912	struct strp_msg *rxm;
 913	char *orig_buf, *buf;
 914
 915	switch (tls_ctx->crypto_recv.info.cipher_type) {
 916	case TLS_CIPHER_AES_GCM_128:
 917	case TLS_CIPHER_AES_GCM_256:
 918		break;
 919	default:
 920		return -EINVAL;
 921	}
 922	cipher_sz = &tls_cipher_size_desc[tls_ctx->crypto_recv.info.cipher_type];
 923
 924	rxm = strp_msg(tls_strp_msg(sw_ctx));
 925	orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv,
 926			   sk->sk_allocation);
 927	if (!orig_buf)
 928		return -ENOMEM;
 929	buf = orig_buf;
 930
 931	err = tls_strp_msg_cow(sw_ctx);
 932	if (unlikely(err))
 933		goto free_buf;
 934
 935	skb = tls_strp_msg(sw_ctx);
 936	rxm = strp_msg(skb);
 937	offset = rxm->offset;
 938
 939	sg_init_table(sg, 1);
 940	sg_set_buf(&sg[0], buf,
 941		   rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv);
 942	err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_sz->iv);
 943	if (err)
 944		goto free_buf;
 945
 946	/* We are interested only in the decrypted data not the auth */
 947	err = decrypt_skb(sk, sg);
 948	if (err != -EBADMSG)
 949		goto free_buf;
 950	else
 951		err = 0;
 952
 953	data_len = rxm->full_len - cipher_sz->tag;
 954
 955	if (skb_pagelen(skb) > offset) {
 956		copy = min_t(int, skb_pagelen(skb) - offset, data_len);
 957
 958		if (skb->decrypted) {
 959			err = skb_store_bits(skb, offset, buf, copy);
 960			if (err)
 961				goto free_buf;
 962		}
 963
 964		offset += copy;
 965		buf += copy;
 966	}
 967
 968	pos = skb_pagelen(skb);
 969	skb_walk_frags(skb, skb_iter) {
 970		int frag_pos;
 971
 972		/* Practically all frags must belong to msg if reencrypt
 973		 * is needed with current strparser and coalescing logic,
 974		 * but strparser may "get optimized", so let's be safe.
 975		 */
 976		if (pos + skb_iter->len <= offset)
 977			goto done_with_frag;
 978		if (pos >= data_len + rxm->offset)
 979			break;
 980
 981		frag_pos = offset - pos;
 982		copy = min_t(int, skb_iter->len - frag_pos,
 983			     data_len + rxm->offset - offset);
 984
 985		if (skb_iter->decrypted) {
 986			err = skb_store_bits(skb_iter, frag_pos, buf, copy);
 987			if (err)
 988				goto free_buf;
 989		}
 990
 991		offset += copy;
 992		buf += copy;
 993done_with_frag:
 994		pos += skb_iter->len;
 995	}
 996
 997free_buf:
 998	kfree(orig_buf);
 999	return err;
1000}
1001
1002int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
1003{
1004	struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
1005	struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
1006	struct sk_buff *skb = tls_strp_msg(sw_ctx);
1007	struct strp_msg *rxm = strp_msg(skb);
1008	int is_decrypted = skb->decrypted;
1009	int is_encrypted = !is_decrypted;
1010	struct sk_buff *skb_iter;
1011	int left;
1012
1013	left = rxm->full_len - skb->len;
1014	/* Check if all the data is decrypted already */
1015	skb_iter = skb_shinfo(skb)->frag_list;
1016	while (skb_iter && left > 0) {
1017		is_decrypted &= skb_iter->decrypted;
1018		is_encrypted &= !skb_iter->decrypted;
1019
1020		left -= skb_iter->len;
1021		skb_iter = skb_iter->next;
 
 
 
 
1022	}
1023
1024	trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
1025				   tls_ctx->rx.rec_seq, rxm->full_len,
1026				   is_encrypted, is_decrypted);
1027
1028	if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
1029		if (likely(is_encrypted || is_decrypted))
1030			return is_decrypted;
1031
1032		/* After tls_device_down disables the offload, the next SKB will
1033		 * likely have initial fragments decrypted, and final ones not
1034		 * decrypted. We need to reencrypt that single SKB.
1035		 */
1036		return tls_device_reencrypt(sk, tls_ctx);
1037	}
1038
1039	/* Return immediately if the record is either entirely plaintext or
1040	 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
1041	 * record.
1042	 */
1043	if (is_decrypted) {
1044		ctx->resync_nh_reset = 1;
1045		return is_decrypted;
1046	}
1047	if (is_encrypted) {
1048		tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
1049		return 0;
1050	}
1051
1052	ctx->resync_nh_reset = 1;
1053	return tls_device_reencrypt(sk, tls_ctx);
1054}
1055
1056static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
1057			      struct net_device *netdev)
1058{
1059	if (sk->sk_destruct != tls_device_sk_destruct) {
1060		refcount_set(&ctx->refcount, 1);
1061		dev_hold(netdev);
1062		RCU_INIT_POINTER(ctx->netdev, netdev);
1063		spin_lock_irq(&tls_device_lock);
1064		list_add_tail(&ctx->list, &tls_device_list);
1065		spin_unlock_irq(&tls_device_lock);
1066
1067		ctx->sk_destruct = sk->sk_destruct;
1068		smp_store_release(&sk->sk_destruct, tls_device_sk_destruct);
1069	}
1070}
1071
1072int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1073{
1074	struct tls_context *tls_ctx = tls_get_ctx(sk);
1075	struct tls_prot_info *prot = &tls_ctx->prot_info;
1076	const struct tls_cipher_size_desc *cipher_sz;
1077	struct tls_record_info *start_marker_record;
1078	struct tls_offload_context_tx *offload_ctx;
 
1079	struct tls_crypto_info *crypto_info;
 
1080	struct net_device *netdev;
 
1081	char *iv, *rec_seq;
1082	struct sk_buff *skb;
1083	__be64 rcd_sn;
1084	int rc;
1085
1086	if (!ctx)
1087		return -EINVAL;
1088
1089	if (ctx->priv_ctx_tx)
1090		return -EEXIST;
1091
1092	netdev = get_netdev_for_sock(sk);
1093	if (!netdev) {
1094		pr_err_ratelimited("%s: netdev not found\n", __func__);
1095		return -EINVAL;
1096	}
1097
1098	if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
1099		rc = -EOPNOTSUPP;
1100		goto release_netdev;
1101	}
1102
1103	crypto_info = &ctx->crypto_send.info;
1104	if (crypto_info->version != TLS_1_2_VERSION) {
1105		rc = -EOPNOTSUPP;
1106		goto release_netdev;
1107	}
1108
1109	switch (crypto_info->cipher_type) {
1110	case TLS_CIPHER_AES_GCM_128:
1111		iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
1112		rec_seq =
1113		 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
1114		break;
1115	case TLS_CIPHER_AES_GCM_256:
1116		iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
1117		rec_seq =
1118		 ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
1119		break;
1120	default:
1121		rc = -EINVAL;
1122		goto release_netdev;
1123	}
1124	cipher_sz = &tls_cipher_size_desc[crypto_info->cipher_type];
1125
1126	/* Sanity-check the rec_seq_size for stack allocations */
1127	if (cipher_sz->rec_seq > TLS_MAX_REC_SEQ_SIZE) {
1128		rc = -EINVAL;
1129		goto release_netdev;
1130	}
1131
1132	prot->version = crypto_info->version;
1133	prot->cipher_type = crypto_info->cipher_type;
1134	prot->prepend_size = TLS_HEADER_SIZE + cipher_sz->iv;
1135	prot->tag_size = cipher_sz->tag;
1136	prot->overhead_size = prot->prepend_size + prot->tag_size;
1137	prot->iv_size = cipher_sz->iv;
1138	prot->salt_size = cipher_sz->salt;
1139	ctx->tx.iv = kmalloc(cipher_sz->iv + cipher_sz->salt, GFP_KERNEL);
1140	if (!ctx->tx.iv) {
1141		rc = -ENOMEM;
1142		goto release_netdev;
1143	}
1144
1145	memcpy(ctx->tx.iv + cipher_sz->salt, iv, cipher_sz->iv);
 
1146
1147	prot->rec_seq_size = cipher_sz->rec_seq;
1148	ctx->tx.rec_seq = kmemdup(rec_seq, cipher_sz->rec_seq, GFP_KERNEL);
1149	if (!ctx->tx.rec_seq) {
1150		rc = -ENOMEM;
1151		goto free_iv;
1152	}
1153
1154	start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
1155	if (!start_marker_record) {
1156		rc = -ENOMEM;
1157		goto free_rec_seq;
1158	}
1159
1160	offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
1161	if (!offload_ctx) {
1162		rc = -ENOMEM;
1163		goto free_marker_record;
1164	}
1165
1166	rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
1167	if (rc)
1168		goto free_offload_ctx;
1169
1170	/* start at rec_seq - 1 to account for the start marker record */
1171	memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
1172	offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
1173
1174	start_marker_record->end_seq = tcp_sk(sk)->write_seq;
1175	start_marker_record->len = 0;
1176	start_marker_record->num_frags = 0;
1177
1178	INIT_WORK(&offload_ctx->destruct_work, tls_device_tx_del_task);
1179	offload_ctx->ctx = ctx;
1180
1181	INIT_LIST_HEAD(&offload_ctx->records_list);
1182	list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
1183	spin_lock_init(&offload_ctx->lock);
1184	sg_init_table(offload_ctx->sg_tx_data,
1185		      ARRAY_SIZE(offload_ctx->sg_tx_data));
1186
1187	clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
1188	ctx->push_pending_record = tls_device_push_pending_record;
1189
1190	/* TLS offload is greatly simplified if we don't send
1191	 * SKBs where only part of the payload needs to be encrypted.
1192	 * So mark the last skb in the write queue as end of record.
1193	 */
1194	skb = tcp_write_queue_tail(sk);
1195	if (skb)
1196		TCP_SKB_CB(skb)->eor = 1;
1197
1198	/* Avoid offloading if the device is down
1199	 * We don't want to offload new flows after
1200	 * the NETDEV_DOWN event
1201	 *
1202	 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1203	 * handler thus protecting from the device going down before
1204	 * ctx was added to tls_device_list.
1205	 */
1206	down_read(&device_offload_lock);
1207	if (!(netdev->flags & IFF_UP)) {
1208		rc = -EINVAL;
1209		goto release_lock;
1210	}
1211
1212	ctx->priv_ctx_tx = offload_ctx;
1213	rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
1214					     &ctx->crypto_send.info,
1215					     tcp_sk(sk)->write_seq);
1216	trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
1217				     tcp_sk(sk)->write_seq, rec_seq, rc);
1218	if (rc)
1219		goto release_lock;
1220
1221	tls_device_attach(ctx, sk, netdev);
1222	up_read(&device_offload_lock);
1223
1224	/* following this assignment tls_is_sk_tx_device_offloaded
1225	 * will return true and the context might be accessed
1226	 * by the netdev's xmit function.
1227	 */
1228	smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
1229	dev_put(netdev);
1230
1231	return 0;
1232
1233release_lock:
1234	up_read(&device_offload_lock);
1235	clean_acked_data_disable(inet_csk(sk));
1236	crypto_free_aead(offload_ctx->aead_send);
1237free_offload_ctx:
1238	kfree(offload_ctx);
1239	ctx->priv_ctx_tx = NULL;
1240free_marker_record:
1241	kfree(start_marker_record);
1242free_rec_seq:
1243	kfree(ctx->tx.rec_seq);
1244free_iv:
1245	kfree(ctx->tx.iv);
1246release_netdev:
1247	dev_put(netdev);
1248	return rc;
1249}
1250
1251int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
1252{
1253	struct tls12_crypto_info_aes_gcm_128 *info;
1254	struct tls_offload_context_rx *context;
1255	struct net_device *netdev;
1256	int rc = 0;
1257
1258	if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
1259		return -EOPNOTSUPP;
1260
1261	netdev = get_netdev_for_sock(sk);
1262	if (!netdev) {
1263		pr_err_ratelimited("%s: netdev not found\n", __func__);
1264		return -EINVAL;
1265	}
1266
1267	if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
1268		rc = -EOPNOTSUPP;
1269		goto release_netdev;
1270	}
1271
1272	/* Avoid offloading if the device is down
1273	 * We don't want to offload new flows after
1274	 * the NETDEV_DOWN event
1275	 *
1276	 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1277	 * handler thus protecting from the device going down before
1278	 * ctx was added to tls_device_list.
1279	 */
1280	down_read(&device_offload_lock);
1281	if (!(netdev->flags & IFF_UP)) {
1282		rc = -EINVAL;
1283		goto release_lock;
1284	}
1285
1286	context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
1287	if (!context) {
1288		rc = -ENOMEM;
1289		goto release_lock;
1290	}
1291	context->resync_nh_reset = 1;
1292
1293	ctx->priv_ctx_rx = context;
1294	rc = tls_set_sw_offload(sk, ctx, 0);
1295	if (rc)
1296		goto release_ctx;
1297
1298	rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
1299					     &ctx->crypto_recv.info,
1300					     tcp_sk(sk)->copied_seq);
1301	info = (void *)&ctx->crypto_recv.info;
1302	trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX,
1303				     tcp_sk(sk)->copied_seq, info->rec_seq, rc);
1304	if (rc)
1305		goto free_sw_resources;
1306
1307	tls_device_attach(ctx, sk, netdev);
1308	up_read(&device_offload_lock);
1309
1310	dev_put(netdev);
1311
1312	return 0;
1313
1314free_sw_resources:
1315	up_read(&device_offload_lock);
1316	tls_sw_free_resources_rx(sk);
1317	down_read(&device_offload_lock);
1318release_ctx:
1319	ctx->priv_ctx_rx = NULL;
1320release_lock:
1321	up_read(&device_offload_lock);
1322release_netdev:
1323	dev_put(netdev);
1324	return rc;
1325}
1326
1327void tls_device_offload_cleanup_rx(struct sock *sk)
1328{
1329	struct tls_context *tls_ctx = tls_get_ctx(sk);
1330	struct net_device *netdev;
1331
1332	down_read(&device_offload_lock);
1333	netdev = rcu_dereference_protected(tls_ctx->netdev,
1334					   lockdep_is_held(&device_offload_lock));
1335	if (!netdev)
1336		goto out;
1337
1338	netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
1339					TLS_OFFLOAD_CTX_DIR_RX);
1340
1341	if (tls_ctx->tx_conf != TLS_HW) {
1342		dev_put(netdev);
1343		rcu_assign_pointer(tls_ctx->netdev, NULL);
1344	} else {
1345		set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
1346	}
1347out:
1348	up_read(&device_offload_lock);
1349	tls_sw_release_resources_rx(sk);
1350}
1351
1352static int tls_device_down(struct net_device *netdev)
1353{
1354	struct tls_context *ctx, *tmp;
1355	unsigned long flags;
1356	LIST_HEAD(list);
1357
1358	/* Request a write lock to block new offload attempts */
1359	down_write(&device_offload_lock);
1360
1361	spin_lock_irqsave(&tls_device_lock, flags);
1362	list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
1363		struct net_device *ctx_netdev =
1364			rcu_dereference_protected(ctx->netdev,
1365						  lockdep_is_held(&device_offload_lock));
1366
1367		if (ctx_netdev != netdev ||
1368		    !refcount_inc_not_zero(&ctx->refcount))
1369			continue;
1370
1371		list_move(&ctx->list, &list);
1372	}
1373	spin_unlock_irqrestore(&tls_device_lock, flags);
1374
1375	list_for_each_entry_safe(ctx, tmp, &list, list)	{
1376		/* Stop offloaded TX and switch to the fallback.
1377		 * tls_is_sk_tx_device_offloaded will return false.
1378		 */
1379		WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
1380
1381		/* Stop the RX and TX resync.
1382		 * tls_dev_resync must not be called after tls_dev_del.
1383		 */
1384		rcu_assign_pointer(ctx->netdev, NULL);
1385
1386		/* Start skipping the RX resync logic completely. */
1387		set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
1388
1389		/* Sync with inflight packets. After this point:
1390		 * TX: no non-encrypted packets will be passed to the driver.
1391		 * RX: resync requests from the driver will be ignored.
1392		 */
1393		synchronize_net();
1394
1395		/* Release the offload context on the driver side. */
1396		if (ctx->tx_conf == TLS_HW)
1397			netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1398							TLS_OFFLOAD_CTX_DIR_TX);
1399		if (ctx->rx_conf == TLS_HW &&
1400		    !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
1401			netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1402							TLS_OFFLOAD_CTX_DIR_RX);
1403
1404		dev_put(netdev);
1405
1406		/* Move the context to a separate list for two reasons:
1407		 * 1. When the context is deallocated, list_del is called.
1408		 * 2. It's no longer an offloaded context, so we don't want to
1409		 *    run offload-specific code on this context.
1410		 */
1411		spin_lock_irqsave(&tls_device_lock, flags);
1412		list_move_tail(&ctx->list, &tls_device_down_list);
1413		spin_unlock_irqrestore(&tls_device_lock, flags);
1414
1415		/* Device contexts for RX and TX will be freed in on sk_destruct
1416		 * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
1417		 * Now release the ref taken above.
1418		 */
1419		if (refcount_dec_and_test(&ctx->refcount)) {
1420			/* sk_destruct ran after tls_device_down took a ref, and
1421			 * it returned early. Complete the destruction here.
1422			 */
1423			list_del(&ctx->list);
1424			tls_device_free_ctx(ctx);
1425		}
1426	}
1427
1428	up_write(&device_offload_lock);
1429
1430	flush_workqueue(destruct_wq);
1431
1432	return NOTIFY_DONE;
1433}
1434
1435static int tls_dev_event(struct notifier_block *this, unsigned long event,
1436			 void *ptr)
1437{
1438	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1439
1440	if (!dev->tlsdev_ops &&
1441	    !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
1442		return NOTIFY_DONE;
1443
1444	switch (event) {
1445	case NETDEV_REGISTER:
1446	case NETDEV_FEAT_CHANGE:
1447		if (netif_is_bond_master(dev))
1448			return NOTIFY_DONE;
1449		if ((dev->features & NETIF_F_HW_TLS_RX) &&
1450		    !dev->tlsdev_ops->tls_dev_resync)
1451			return NOTIFY_BAD;
1452
1453		if  (dev->tlsdev_ops &&
1454		     dev->tlsdev_ops->tls_dev_add &&
1455		     dev->tlsdev_ops->tls_dev_del)
1456			return NOTIFY_DONE;
1457		else
1458			return NOTIFY_BAD;
1459	case NETDEV_DOWN:
1460		return tls_device_down(dev);
1461	}
1462	return NOTIFY_DONE;
1463}
1464
1465static struct notifier_block tls_dev_notifier = {
1466	.notifier_call	= tls_dev_event,
1467};
1468
1469int __init tls_device_init(void)
1470{
1471	int err;
1472
1473	destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0);
1474	if (!destruct_wq)
1475		return -ENOMEM;
1476
 
 
 
 
 
 
1477	err = register_netdevice_notifier(&tls_dev_notifier);
1478	if (err)
1479		destroy_workqueue(destruct_wq);
1480
 
 
 
 
 
 
1481	return err;
1482}
1483
1484void __exit tls_device_cleanup(void)
1485{
1486	unregister_netdevice_notifier(&tls_dev_notifier);
1487	destroy_workqueue(destruct_wq);
1488	clean_acked_data_flush();
 
1489}
v6.13.7
   1/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
   2 *
   3 * This software is available to you under a choice of one of two
   4 * licenses.  You may choose to be licensed under the terms of the GNU
   5 * General Public License (GPL) Version 2, available from the file
   6 * COPYING in the main directory of this source tree, or the
   7 * OpenIB.org BSD license below:
   8 *
   9 *     Redistribution and use in source and binary forms, with or
  10 *     without modification, are permitted provided that the following
  11 *     conditions are met:
  12 *
  13 *      - Redistributions of source code must retain the above
  14 *        copyright notice, this list of conditions and the following
  15 *        disclaimer.
  16 *
  17 *      - Redistributions in binary form must reproduce the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer in the documentation and/or other materials
  20 *        provided with the distribution.
  21 *
  22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  29 * SOFTWARE.
  30 */
  31
  32#include <crypto/aead.h>
  33#include <linux/highmem.h>
  34#include <linux/module.h>
  35#include <linux/netdevice.h>
  36#include <net/dst.h>
  37#include <net/inet_connection_sock.h>
  38#include <net/tcp.h>
  39#include <net/tls.h>
  40#include <linux/skbuff_ref.h>
  41
  42#include "tls.h"
  43#include "trace.h"
  44
  45/* device_offload_lock is used to synchronize tls_dev_add
  46 * against NETDEV_DOWN notifications.
  47 */
  48static DECLARE_RWSEM(device_offload_lock);
  49
  50static struct workqueue_struct *destruct_wq __read_mostly;
  51
  52static LIST_HEAD(tls_device_list);
  53static LIST_HEAD(tls_device_down_list);
  54static DEFINE_SPINLOCK(tls_device_lock);
  55
  56static struct page *dummy_page;
  57
  58static void tls_device_free_ctx(struct tls_context *ctx)
  59{
  60	if (ctx->tx_conf == TLS_HW)
  61		kfree(tls_offload_ctx_tx(ctx));
 
 
 
  62
  63	if (ctx->rx_conf == TLS_HW)
  64		kfree(tls_offload_ctx_rx(ctx));
  65
  66	tls_ctx_free(NULL, ctx);
  67}
  68
  69static void tls_device_tx_del_task(struct work_struct *work)
  70{
  71	struct tls_offload_context_tx *offload_ctx =
  72		container_of(work, struct tls_offload_context_tx, destruct_work);
  73	struct tls_context *ctx = offload_ctx->ctx;
  74	struct net_device *netdev;
  75
  76	/* Safe, because this is the destroy flow, refcount is 0, so
  77	 * tls_device_down can't store this field in parallel.
  78	 */
  79	netdev = rcu_dereference_protected(ctx->netdev,
  80					   !refcount_read(&ctx->refcount));
  81
  82	netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX);
  83	dev_put(netdev);
  84	ctx->netdev = NULL;
  85	tls_device_free_ctx(ctx);
  86}
  87
  88static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
  89{
  90	struct net_device *netdev;
  91	unsigned long flags;
  92	bool async_cleanup;
  93
  94	spin_lock_irqsave(&tls_device_lock, flags);
  95	if (unlikely(!refcount_dec_and_test(&ctx->refcount))) {
  96		spin_unlock_irqrestore(&tls_device_lock, flags);
  97		return;
  98	}
  99
 100	list_del(&ctx->list); /* Remove from tls_device_list / tls_device_down_list */
 101
 102	/* Safe, because this is the destroy flow, refcount is 0, so
 103	 * tls_device_down can't store this field in parallel.
 104	 */
 105	netdev = rcu_dereference_protected(ctx->netdev,
 106					   !refcount_read(&ctx->refcount));
 107
 108	async_cleanup = netdev && ctx->tx_conf == TLS_HW;
 109	if (async_cleanup) {
 110		struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(ctx);
 111
 112		/* queue_work inside the spinlock
 113		 * to make sure tls_device_down waits for that work.
 114		 */
 115		queue_work(destruct_wq, &offload_ctx->destruct_work);
 116	}
 117	spin_unlock_irqrestore(&tls_device_lock, flags);
 118
 119	if (!async_cleanup)
 120		tls_device_free_ctx(ctx);
 121}
 122
 123/* We assume that the socket is already connected */
 124static struct net_device *get_netdev_for_sock(struct sock *sk)
 125{
 126	struct dst_entry *dst = sk_dst_get(sk);
 127	struct net_device *netdev = NULL;
 128
 129	if (likely(dst)) {
 130		netdev = netdev_sk_get_lowest_dev(dst->dev, sk);
 131		dev_hold(netdev);
 132	}
 133
 134	dst_release(dst);
 135
 136	return netdev;
 137}
 138
 139static void destroy_record(struct tls_record_info *record)
 140{
 141	int i;
 142
 143	for (i = 0; i < record->num_frags; i++)
 144		__skb_frag_unref(&record->frags[i], false);
 145	kfree(record);
 146}
 147
 148static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
 149{
 150	struct tls_record_info *info, *temp;
 151
 152	list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
 153		list_del(&info->list);
 154		destroy_record(info);
 155	}
 156
 157	offload_ctx->retransmit_hint = NULL;
 158}
 159
 160static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
 161{
 162	struct tls_context *tls_ctx = tls_get_ctx(sk);
 163	struct tls_record_info *info, *temp;
 164	struct tls_offload_context_tx *ctx;
 165	u64 deleted_records = 0;
 166	unsigned long flags;
 167
 168	if (!tls_ctx)
 169		return;
 170
 171	ctx = tls_offload_ctx_tx(tls_ctx);
 172
 173	spin_lock_irqsave(&ctx->lock, flags);
 174	info = ctx->retransmit_hint;
 175	if (info && !before(acked_seq, info->end_seq))
 176		ctx->retransmit_hint = NULL;
 177
 178	list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
 179		if (before(acked_seq, info->end_seq))
 180			break;
 181		list_del(&info->list);
 182
 183		destroy_record(info);
 184		deleted_records++;
 185	}
 186
 187	ctx->unacked_record_sn += deleted_records;
 188	spin_unlock_irqrestore(&ctx->lock, flags);
 189}
 190
 191/* At this point, there should be no references on this
 192 * socket and no in-flight SKBs associated with this
 193 * socket, so it is safe to free all the resources.
 194 */
 195void tls_device_sk_destruct(struct sock *sk)
 196{
 197	struct tls_context *tls_ctx = tls_get_ctx(sk);
 198	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
 199
 200	tls_ctx->sk_destruct(sk);
 201
 202	if (tls_ctx->tx_conf == TLS_HW) {
 203		if (ctx->open_record)
 204			destroy_record(ctx->open_record);
 205		delete_all_records(ctx);
 206		crypto_free_aead(ctx->aead_send);
 207		clean_acked_data_disable(inet_csk(sk));
 208	}
 209
 210	tls_device_queue_ctx_destruction(tls_ctx);
 211}
 212EXPORT_SYMBOL_GPL(tls_device_sk_destruct);
 213
 214void tls_device_free_resources_tx(struct sock *sk)
 215{
 216	struct tls_context *tls_ctx = tls_get_ctx(sk);
 217
 218	tls_free_partial_record(sk, tls_ctx);
 219}
 220
 221void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq)
 222{
 223	struct tls_context *tls_ctx = tls_get_ctx(sk);
 224
 225	trace_tls_device_tx_resync_req(sk, got_seq, exp_seq);
 226	WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
 227}
 228EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request);
 229
 230static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
 231				 u32 seq)
 232{
 233	struct net_device *netdev;
 
 234	int err = 0;
 235	u8 *rcd_sn;
 236
 237	tcp_write_collapse_fence(sk);
 
 
 
 238	rcd_sn = tls_ctx->tx.rec_seq;
 239
 240	trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
 241	down_read(&device_offload_lock);
 242	netdev = rcu_dereference_protected(tls_ctx->netdev,
 243					   lockdep_is_held(&device_offload_lock));
 244	if (netdev)
 245		err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
 246							 rcd_sn,
 247							 TLS_OFFLOAD_CTX_DIR_TX);
 248	up_read(&device_offload_lock);
 249	if (err)
 250		return;
 251
 252	clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
 253}
 254
 255static void tls_append_frag(struct tls_record_info *record,
 256			    struct page_frag *pfrag,
 257			    int size)
 258{
 259	skb_frag_t *frag;
 260
 261	frag = &record->frags[record->num_frags - 1];
 262	if (skb_frag_page(frag) == pfrag->page &&
 263	    skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) {
 264		skb_frag_size_add(frag, size);
 265	} else {
 266		++frag;
 267		skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset,
 268					size);
 
 269		++record->num_frags;
 270		get_page(pfrag->page);
 271	}
 272
 273	pfrag->offset += size;
 274	record->len += size;
 275}
 276
 277static int tls_push_record(struct sock *sk,
 278			   struct tls_context *ctx,
 279			   struct tls_offload_context_tx *offload_ctx,
 280			   struct tls_record_info *record,
 281			   int flags)
 282{
 283	struct tls_prot_info *prot = &ctx->prot_info;
 284	struct tcp_sock *tp = tcp_sk(sk);
 285	skb_frag_t *frag;
 286	int i;
 287
 288	record->end_seq = tp->write_seq + record->len;
 289	list_add_tail_rcu(&record->list, &offload_ctx->records_list);
 290	offload_ctx->open_record = NULL;
 291
 292	if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
 293		tls_device_resync_tx(sk, ctx, tp->write_seq);
 294
 295	tls_advance_record_sn(sk, prot, &ctx->tx);
 296
 297	for (i = 0; i < record->num_frags; i++) {
 298		frag = &record->frags[i];
 299		sg_unmark_end(&offload_ctx->sg_tx_data[i]);
 300		sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
 301			    skb_frag_size(frag), skb_frag_off(frag));
 302		sk_mem_charge(sk, skb_frag_size(frag));
 303		get_page(skb_frag_page(frag));
 304	}
 305	sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
 306
 307	/* all ready, send */
 308	return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
 309}
 310
 311static void tls_device_record_close(struct sock *sk,
 312				    struct tls_context *ctx,
 313				    struct tls_record_info *record,
 314				    struct page_frag *pfrag,
 315				    unsigned char record_type)
 316{
 317	struct tls_prot_info *prot = &ctx->prot_info;
 318	struct page_frag dummy_tag_frag;
 319
 320	/* append tag
 321	 * device will fill in the tag, we just need to append a placeholder
 322	 * use socket memory to improve coalescing (re-using a single buffer
 323	 * increases frag count)
 324	 * if we can't allocate memory now use the dummy page
 325	 */
 326	if (unlikely(pfrag->size - pfrag->offset < prot->tag_size) &&
 327	    !skb_page_frag_refill(prot->tag_size, pfrag, sk->sk_allocation)) {
 328		dummy_tag_frag.page = dummy_page;
 329		dummy_tag_frag.offset = 0;
 330		pfrag = &dummy_tag_frag;
 
 
 
 331	}
 332	tls_append_frag(record, pfrag, prot->tag_size);
 333
 334	/* fill prepend */
 335	tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
 336			 record->len - prot->overhead_size,
 337			 record_type);
 
 338}
 339
 340static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
 341				 struct page_frag *pfrag,
 342				 size_t prepend_size)
 343{
 344	struct tls_record_info *record;
 345	skb_frag_t *frag;
 346
 347	record = kmalloc(sizeof(*record), GFP_KERNEL);
 348	if (!record)
 349		return -ENOMEM;
 350
 351	frag = &record->frags[0];
 352	skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset,
 353				prepend_size);
 
 354
 355	get_page(pfrag->page);
 356	pfrag->offset += prepend_size;
 357
 358	record->num_frags = 1;
 359	record->len = prepend_size;
 360	offload_ctx->open_record = record;
 361	return 0;
 362}
 363
 364static int tls_do_allocation(struct sock *sk,
 365			     struct tls_offload_context_tx *offload_ctx,
 366			     struct page_frag *pfrag,
 367			     size_t prepend_size)
 368{
 369	int ret;
 370
 371	if (!offload_ctx->open_record) {
 372		if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
 373						   sk->sk_allocation))) {
 374			READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
 375			sk_stream_moderate_sndbuf(sk);
 376			return -ENOMEM;
 377		}
 378
 379		ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
 380		if (ret)
 381			return ret;
 382
 383		if (pfrag->size > pfrag->offset)
 384			return 0;
 385	}
 386
 387	if (!sk_page_frag_refill(sk, pfrag))
 388		return -ENOMEM;
 389
 390	return 0;
 391}
 392
 393static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i)
 394{
 395	size_t pre_copy, nocache;
 396
 397	pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1);
 398	if (pre_copy) {
 399		pre_copy = min(pre_copy, bytes);
 400		if (copy_from_iter(addr, pre_copy, i) != pre_copy)
 401			return -EFAULT;
 402		bytes -= pre_copy;
 403		addr += pre_copy;
 404	}
 405
 406	nocache = round_down(bytes, SMP_CACHE_BYTES);
 407	if (copy_from_iter_nocache(addr, nocache, i) != nocache)
 408		return -EFAULT;
 409	bytes -= nocache;
 410	addr += nocache;
 411
 412	if (bytes && copy_from_iter(addr, bytes, i) != bytes)
 413		return -EFAULT;
 414
 415	return 0;
 416}
 417
 
 
 
 
 
 418static int tls_push_data(struct sock *sk,
 419			 struct iov_iter *iter,
 420			 size_t size, int flags,
 421			 unsigned char record_type)
 
 422{
 423	struct tls_context *tls_ctx = tls_get_ctx(sk);
 424	struct tls_prot_info *prot = &tls_ctx->prot_info;
 425	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
 426	struct tls_record_info *record;
 427	int tls_push_record_flags;
 428	struct page_frag *pfrag;
 429	size_t orig_size = size;
 430	u32 max_open_record_len;
 431	bool more = false;
 432	bool done = false;
 433	int copy, rc = 0;
 434	long timeo;
 435
 436	if (flags &
 437	    ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
 438	      MSG_SPLICE_PAGES | MSG_EOR))
 439		return -EOPNOTSUPP;
 440
 441	if ((flags & (MSG_MORE | MSG_EOR)) == (MSG_MORE | MSG_EOR))
 442		return -EINVAL;
 443
 444	if (unlikely(sk->sk_err))
 445		return -sk->sk_err;
 446
 447	flags |= MSG_SENDPAGE_DECRYPTED;
 448	tls_push_record_flags = flags | MSG_MORE;
 449
 450	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
 451	if (tls_is_partially_sent_record(tls_ctx)) {
 452		rc = tls_push_partial_record(sk, tls_ctx, flags);
 453		if (rc < 0)
 454			return rc;
 455	}
 456
 457	pfrag = sk_page_frag(sk);
 458
 459	/* TLS_HEADER_SIZE is not counted as part of the TLS record, and
 460	 * we need to leave room for an authentication tag.
 461	 */
 462	max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
 463			      prot->prepend_size;
 464	do {
 465		rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
 466		if (unlikely(rc)) {
 467			rc = sk_stream_wait_memory(sk, &timeo);
 468			if (!rc)
 469				continue;
 470
 471			record = ctx->open_record;
 472			if (!record)
 473				break;
 474handle_error:
 475			if (record_type != TLS_RECORD_TYPE_DATA) {
 476				/* avoid sending partial
 477				 * record with type !=
 478				 * application_data
 479				 */
 480				size = orig_size;
 481				destroy_record(record);
 482				ctx->open_record = NULL;
 483			} else if (record->len > prot->prepend_size) {
 484				goto last_record;
 485			}
 486
 487			break;
 488		}
 489
 490		record = ctx->open_record;
 491
 492		copy = min_t(size_t, size, max_open_record_len - record->len);
 493		if (copy && (flags & MSG_SPLICE_PAGES)) {
 494			struct page_frag zc_pfrag;
 495			struct page **pages = &zc_pfrag.page;
 496			size_t off;
 497
 498			rc = iov_iter_extract_pages(iter, &pages,
 499						    copy, 1, 0, &off);
 500			if (rc <= 0) {
 501				if (rc == 0)
 502					rc = -EIO;
 503				goto handle_error;
 504			}
 505			copy = rc;
 506
 507			if (WARN_ON_ONCE(!sendpage_ok(zc_pfrag.page))) {
 508				iov_iter_revert(iter, copy);
 509				rc = -EIO;
 510				goto handle_error;
 511			}
 512
 513			zc_pfrag.offset = off;
 
 514			zc_pfrag.size = copy;
 515			tls_append_frag(record, &zc_pfrag, copy);
 516		} else if (copy) {
 517			copy = min_t(size_t, copy, pfrag->size - pfrag->offset);
 518
 519			rc = tls_device_copy_data(page_address(pfrag->page) +
 520						  pfrag->offset, copy,
 521						  iter);
 522			if (rc)
 523				goto handle_error;
 524			tls_append_frag(record, pfrag, copy);
 525		}
 526
 527		size -= copy;
 528		if (!size) {
 529last_record:
 530			tls_push_record_flags = flags;
 531			if (flags & MSG_MORE) {
 532				more = true;
 533				break;
 534			}
 535
 536			done = true;
 537		}
 538
 539		if (done || record->len >= max_open_record_len ||
 540		    (record->num_frags >= MAX_SKB_FRAGS - 1)) {
 541			tls_device_record_close(sk, tls_ctx, record,
 542						pfrag, record_type);
 
 
 
 
 
 
 
 
 
 
 543
 544			rc = tls_push_record(sk,
 545					     tls_ctx,
 546					     ctx,
 547					     record,
 548					     tls_push_record_flags);
 549			if (rc < 0)
 550				break;
 551		}
 552	} while (!done);
 553
 554	tls_ctx->pending_open_record_frags = more;
 555
 556	if (orig_size - size > 0)
 557		rc = orig_size - size;
 558
 559	return rc;
 560}
 561
 562int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 563{
 564	unsigned char record_type = TLS_RECORD_TYPE_DATA;
 565	struct tls_context *tls_ctx = tls_get_ctx(sk);
 
 566	int rc;
 567
 568	if (!tls_ctx->zerocopy_sendfile)
 569		msg->msg_flags &= ~MSG_SPLICE_PAGES;
 570
 571	mutex_lock(&tls_ctx->tx_lock);
 572	lock_sock(sk);
 573
 574	if (unlikely(msg->msg_controllen)) {
 575		rc = tls_process_cmsg(sk, msg, &record_type);
 576		if (rc)
 577			goto out;
 578	}
 579
 580	rc = tls_push_data(sk, &msg->msg_iter, size, msg->msg_flags,
 581			   record_type);
 582
 583out:
 584	release_sock(sk);
 585	mutex_unlock(&tls_ctx->tx_lock);
 586	return rc;
 587}
 588
 589void tls_device_splice_eof(struct socket *sock)
 
 590{
 591	struct sock *sk = sock->sk;
 592	struct tls_context *tls_ctx = tls_get_ctx(sk);
 593	struct iov_iter iter = {};
 
 
 
 
 594
 595	if (!tls_is_partially_sent_record(tls_ctx))
 596		return;
 597
 598	mutex_lock(&tls_ctx->tx_lock);
 599	lock_sock(sk);
 600
 601	if (tls_is_partially_sent_record(tls_ctx)) {
 602		iov_iter_bvec(&iter, ITER_SOURCE, NULL, 0, 0);
 603		tls_push_data(sk, &iter, 0, 0, TLS_RECORD_TYPE_DATA);
 
 
 
 
 
 
 
 604	}
 605
 
 
 
 
 
 
 
 
 
 
 606	release_sock(sk);
 607	mutex_unlock(&tls_ctx->tx_lock);
 
 608}
 609
 610struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
 611				       u32 seq, u64 *p_record_sn)
 612{
 613	u64 record_sn = context->hint_record_sn;
 614	struct tls_record_info *info, *last;
 615
 616	info = context->retransmit_hint;
 617	if (!info ||
 618	    before(seq, info->end_seq - info->len)) {
 619		/* if retransmit_hint is irrelevant start
 620		 * from the beginning of the list
 621		 */
 622		info = list_first_entry_or_null(&context->records_list,
 623						struct tls_record_info, list);
 624		if (!info)
 625			return NULL;
 626		/* send the start_marker record if seq number is before the
 627		 * tls offload start marker sequence number. This record is
 628		 * required to handle TCP packets which are before TLS offload
 629		 * started.
 630		 *  And if it's not start marker, look if this seq number
 631		 * belongs to the list.
 632		 */
 633		if (likely(!tls_record_is_start_marker(info))) {
 634			/* we have the first record, get the last record to see
 635			 * if this seq number belongs to the list.
 636			 */
 637			last = list_last_entry(&context->records_list,
 638					       struct tls_record_info, list);
 639
 640			if (!between(seq, tls_record_start_seq(info),
 641				     last->end_seq))
 642				return NULL;
 643		}
 644		record_sn = context->unacked_record_sn;
 645	}
 646
 647	/* We just need the _rcu for the READ_ONCE() */
 648	rcu_read_lock();
 649	list_for_each_entry_from_rcu(info, &context->records_list, list) {
 650		if (before(seq, info->end_seq)) {
 651			if (!context->retransmit_hint ||
 652			    after(info->end_seq,
 653				  context->retransmit_hint->end_seq)) {
 654				context->hint_record_sn = record_sn;
 655				context->retransmit_hint = info;
 656			}
 657			*p_record_sn = record_sn;
 658			goto exit_rcu_unlock;
 659		}
 660		record_sn++;
 661	}
 662	info = NULL;
 663
 664exit_rcu_unlock:
 665	rcu_read_unlock();
 666	return info;
 667}
 668EXPORT_SYMBOL(tls_get_record);
 669
 670static int tls_device_push_pending_record(struct sock *sk, int flags)
 671{
 672	struct iov_iter iter;
 
 673
 674	iov_iter_kvec(&iter, ITER_SOURCE, NULL, 0, 0);
 675	return tls_push_data(sk, &iter, 0, flags, TLS_RECORD_TYPE_DATA);
 
 676}
 677
 678void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
 679{
 680	if (tls_is_partially_sent_record(ctx)) {
 681		gfp_t sk_allocation = sk->sk_allocation;
 682
 683		WARN_ON_ONCE(sk->sk_write_pending);
 684
 685		sk->sk_allocation = GFP_ATOMIC;
 686		tls_push_partial_record(sk, ctx,
 687					MSG_DONTWAIT | MSG_NOSIGNAL |
 688					MSG_SENDPAGE_DECRYPTED);
 689		sk->sk_allocation = sk_allocation;
 690	}
 691}
 692
 693static void tls_device_resync_rx(struct tls_context *tls_ctx,
 694				 struct sock *sk, u32 seq, u8 *rcd_sn)
 695{
 696	struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
 697	struct net_device *netdev;
 698
 699	trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
 700	rcu_read_lock();
 701	netdev = rcu_dereference(tls_ctx->netdev);
 702	if (netdev)
 703		netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
 704						   TLS_OFFLOAD_CTX_DIR_RX);
 705	rcu_read_unlock();
 706	TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
 707}
 708
 709static bool
 710tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
 711			   s64 resync_req, u32 *seq, u16 *rcd_delta)
 712{
 713	u32 is_async = resync_req & RESYNC_REQ_ASYNC;
 714	u32 req_seq = resync_req >> 32;
 715	u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
 716	u16 i;
 717
 718	*rcd_delta = 0;
 719
 720	if (is_async) {
 721		/* shouldn't get to wraparound:
 722		 * too long in async stage, something bad happened
 723		 */
 724		if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
 725			return false;
 726
 727		/* asynchronous stage: log all headers seq such that
 728		 * req_seq <= seq <= end_seq, and wait for real resync request
 729		 */
 730		if (before(*seq, req_seq))
 731			return false;
 732		if (!after(*seq, req_end) &&
 733		    resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
 734			resync_async->log[resync_async->loglen++] = *seq;
 735
 736		resync_async->rcd_delta++;
 737
 738		return false;
 739	}
 740
 741	/* synchronous stage: check against the logged entries and
 742	 * proceed to check the next entries if no match was found
 743	 */
 744	for (i = 0; i < resync_async->loglen; i++)
 745		if (req_seq == resync_async->log[i] &&
 746		    atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) {
 747			*rcd_delta = resync_async->rcd_delta - i;
 748			*seq = req_seq;
 749			resync_async->loglen = 0;
 750			resync_async->rcd_delta = 0;
 751			return true;
 752		}
 753
 754	resync_async->loglen = 0;
 755	resync_async->rcd_delta = 0;
 756
 757	if (req_seq == *seq &&
 758	    atomic64_try_cmpxchg(&resync_async->req,
 759				 &resync_req, 0))
 760		return true;
 761
 762	return false;
 763}
 764
 765void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
 766{
 767	struct tls_context *tls_ctx = tls_get_ctx(sk);
 768	struct tls_offload_context_rx *rx_ctx;
 769	u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
 770	u32 sock_data, is_req_pending;
 771	struct tls_prot_info *prot;
 772	s64 resync_req;
 773	u16 rcd_delta;
 774	u32 req_seq;
 775
 776	if (tls_ctx->rx_conf != TLS_HW)
 777		return;
 778	if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
 779		return;
 780
 781	prot = &tls_ctx->prot_info;
 782	rx_ctx = tls_offload_ctx_rx(tls_ctx);
 783	memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
 784
 785	switch (rx_ctx->resync_type) {
 786	case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
 787		resync_req = atomic64_read(&rx_ctx->resync_req);
 788		req_seq = resync_req >> 32;
 789		seq += TLS_HEADER_SIZE - 1;
 790		is_req_pending = resync_req;
 791
 792		if (likely(!is_req_pending) || req_seq != seq ||
 793		    !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
 794			return;
 795		break;
 796	case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
 797		if (likely(!rx_ctx->resync_nh_do_now))
 798			return;
 799
 800		/* head of next rec is already in, note that the sock_inq will
 801		 * include the currently parsed message when called from parser
 802		 */
 803		sock_data = tcp_inq(sk);
 804		if (sock_data > rcd_len) {
 805			trace_tls_device_rx_resync_nh_delay(sk, sock_data,
 806							    rcd_len);
 807			return;
 808		}
 809
 810		rx_ctx->resync_nh_do_now = 0;
 811		seq += rcd_len;
 812		tls_bigint_increment(rcd_sn, prot->rec_seq_size);
 813		break;
 814	case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC:
 815		resync_req = atomic64_read(&rx_ctx->resync_async->req);
 816		is_req_pending = resync_req;
 817		if (likely(!is_req_pending))
 818			return;
 819
 820		if (!tls_device_rx_resync_async(rx_ctx->resync_async,
 821						resync_req, &seq, &rcd_delta))
 822			return;
 823		tls_bigint_subtract(rcd_sn, rcd_delta);
 824		break;
 825	}
 826
 827	tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
 828}
 829
 830static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
 831					   struct tls_offload_context_rx *ctx,
 832					   struct sock *sk, struct sk_buff *skb)
 833{
 834	struct strp_msg *rxm;
 835
 836	/* device will request resyncs by itself based on stream scan */
 837	if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
 838		return;
 839	/* already scheduled */
 840	if (ctx->resync_nh_do_now)
 841		return;
 842	/* seen decrypted fragments since last fully-failed record */
 843	if (ctx->resync_nh_reset) {
 844		ctx->resync_nh_reset = 0;
 845		ctx->resync_nh.decrypted_failed = 1;
 846		ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
 847		return;
 848	}
 849
 850	if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
 851		return;
 852
 853	/* doing resync, bump the next target in case it fails */
 854	if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
 855		ctx->resync_nh.decrypted_tgt *= 2;
 856	else
 857		ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
 858
 859	rxm = strp_msg(skb);
 860
 861	/* head of next rec is already in, parser will sync for us */
 862	if (tcp_inq(sk) > rxm->full_len) {
 863		trace_tls_device_rx_resync_nh_schedule(sk);
 864		ctx->resync_nh_do_now = 1;
 865	} else {
 866		struct tls_prot_info *prot = &tls_ctx->prot_info;
 867		u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
 868
 869		memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
 870		tls_bigint_increment(rcd_sn, prot->rec_seq_size);
 871
 872		tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
 873				     rcd_sn);
 874	}
 875}
 876
 877static int
 878tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
 879{
 880	struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
 881	const struct tls_cipher_desc *cipher_desc;
 882	int err, offset, copy, data_len, pos;
 883	struct sk_buff *skb, *skb_iter;
 884	struct scatterlist sg[1];
 885	struct strp_msg *rxm;
 886	char *orig_buf, *buf;
 887
 888	cipher_desc = get_cipher_desc(tls_ctx->crypto_recv.info.cipher_type);
 889	DEBUG_NET_WARN_ON_ONCE(!cipher_desc || !cipher_desc->offloadable);
 
 
 
 
 
 
 890
 891	rxm = strp_msg(tls_strp_msg(sw_ctx));
 892	orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv,
 893			   sk->sk_allocation);
 894	if (!orig_buf)
 895		return -ENOMEM;
 896	buf = orig_buf;
 897
 898	err = tls_strp_msg_cow(sw_ctx);
 899	if (unlikely(err))
 900		goto free_buf;
 901
 902	skb = tls_strp_msg(sw_ctx);
 903	rxm = strp_msg(skb);
 904	offset = rxm->offset;
 905
 906	sg_init_table(sg, 1);
 907	sg_set_buf(&sg[0], buf,
 908		   rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv);
 909	err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_desc->iv);
 910	if (err)
 911		goto free_buf;
 912
 913	/* We are interested only in the decrypted data not the auth */
 914	err = decrypt_skb(sk, sg);
 915	if (err != -EBADMSG)
 916		goto free_buf;
 917	else
 918		err = 0;
 919
 920	data_len = rxm->full_len - cipher_desc->tag;
 921
 922	if (skb_pagelen(skb) > offset) {
 923		copy = min_t(int, skb_pagelen(skb) - offset, data_len);
 924
 925		if (skb->decrypted) {
 926			err = skb_store_bits(skb, offset, buf, copy);
 927			if (err)
 928				goto free_buf;
 929		}
 930
 931		offset += copy;
 932		buf += copy;
 933	}
 934
 935	pos = skb_pagelen(skb);
 936	skb_walk_frags(skb, skb_iter) {
 937		int frag_pos;
 938
 939		/* Practically all frags must belong to msg if reencrypt
 940		 * is needed with current strparser and coalescing logic,
 941		 * but strparser may "get optimized", so let's be safe.
 942		 */
 943		if (pos + skb_iter->len <= offset)
 944			goto done_with_frag;
 945		if (pos >= data_len + rxm->offset)
 946			break;
 947
 948		frag_pos = offset - pos;
 949		copy = min_t(int, skb_iter->len - frag_pos,
 950			     data_len + rxm->offset - offset);
 951
 952		if (skb_iter->decrypted) {
 953			err = skb_store_bits(skb_iter, frag_pos, buf, copy);
 954			if (err)
 955				goto free_buf;
 956		}
 957
 958		offset += copy;
 959		buf += copy;
 960done_with_frag:
 961		pos += skb_iter->len;
 962	}
 963
 964free_buf:
 965	kfree(orig_buf);
 966	return err;
 967}
 968
 969int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
 970{
 971	struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
 972	struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
 973	struct sk_buff *skb = tls_strp_msg(sw_ctx);
 974	struct strp_msg *rxm = strp_msg(skb);
 975	int is_decrypted, is_encrypted;
 
 
 
 
 
 
 
 
 
 
 976
 977	if (!tls_strp_msg_mixed_decrypted(sw_ctx)) {
 978		is_decrypted = skb->decrypted;
 979		is_encrypted = !is_decrypted;
 980	} else {
 981		is_decrypted = 0;
 982		is_encrypted = 0;
 983	}
 984
 985	trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
 986				   tls_ctx->rx.rec_seq, rxm->full_len,
 987				   is_encrypted, is_decrypted);
 988
 989	if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
 990		if (likely(is_encrypted || is_decrypted))
 991			return is_decrypted;
 992
 993		/* After tls_device_down disables the offload, the next SKB will
 994		 * likely have initial fragments decrypted, and final ones not
 995		 * decrypted. We need to reencrypt that single SKB.
 996		 */
 997		return tls_device_reencrypt(sk, tls_ctx);
 998	}
 999
1000	/* Return immediately if the record is either entirely plaintext or
1001	 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
1002	 * record.
1003	 */
1004	if (is_decrypted) {
1005		ctx->resync_nh_reset = 1;
1006		return is_decrypted;
1007	}
1008	if (is_encrypted) {
1009		tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
1010		return 0;
1011	}
1012
1013	ctx->resync_nh_reset = 1;
1014	return tls_device_reencrypt(sk, tls_ctx);
1015}
1016
1017static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
1018			      struct net_device *netdev)
1019{
1020	if (sk->sk_destruct != tls_device_sk_destruct) {
1021		refcount_set(&ctx->refcount, 1);
1022		dev_hold(netdev);
1023		RCU_INIT_POINTER(ctx->netdev, netdev);
1024		spin_lock_irq(&tls_device_lock);
1025		list_add_tail(&ctx->list, &tls_device_list);
1026		spin_unlock_irq(&tls_device_lock);
1027
1028		ctx->sk_destruct = sk->sk_destruct;
1029		smp_store_release(&sk->sk_destruct, tls_device_sk_destruct);
1030	}
1031}
1032
1033static struct tls_offload_context_tx *alloc_offload_ctx_tx(struct tls_context *ctx)
1034{
1035	struct tls_offload_context_tx *offload_ctx;
1036	__be64 rcd_sn;
1037
1038	offload_ctx = kzalloc(sizeof(*offload_ctx), GFP_KERNEL);
1039	if (!offload_ctx)
1040		return NULL;
1041
1042	INIT_WORK(&offload_ctx->destruct_work, tls_device_tx_del_task);
1043	INIT_LIST_HEAD(&offload_ctx->records_list);
1044	spin_lock_init(&offload_ctx->lock);
1045	sg_init_table(offload_ctx->sg_tx_data,
1046		      ARRAY_SIZE(offload_ctx->sg_tx_data));
1047
1048	/* start at rec_seq - 1 to account for the start marker record */
1049	memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
1050	offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
1051
1052	offload_ctx->ctx = ctx;
1053
1054	return offload_ctx;
1055}
1056
1057int tls_set_device_offload(struct sock *sk)
1058{
 
 
 
1059	struct tls_record_info *start_marker_record;
1060	struct tls_offload_context_tx *offload_ctx;
1061	const struct tls_cipher_desc *cipher_desc;
1062	struct tls_crypto_info *crypto_info;
1063	struct tls_prot_info *prot;
1064	struct net_device *netdev;
1065	struct tls_context *ctx;
1066	char *iv, *rec_seq;
 
 
1067	int rc;
1068
1069	ctx = tls_get_ctx(sk);
1070	prot = &ctx->prot_info;
1071
1072	if (ctx->priv_ctx_tx)
1073		return -EEXIST;
1074
1075	netdev = get_netdev_for_sock(sk);
1076	if (!netdev) {
1077		pr_err_ratelimited("%s: netdev not found\n", __func__);
1078		return -EINVAL;
1079	}
1080
1081	if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
1082		rc = -EOPNOTSUPP;
1083		goto release_netdev;
1084	}
1085
1086	crypto_info = &ctx->crypto_send.info;
1087	if (crypto_info->version != TLS_1_2_VERSION) {
1088		rc = -EOPNOTSUPP;
1089		goto release_netdev;
1090	}
1091
1092	cipher_desc = get_cipher_desc(crypto_info->cipher_type);
1093	if (!cipher_desc || !cipher_desc->offloadable) {
 
 
 
 
 
 
 
 
 
 
1094		rc = -EINVAL;
1095		goto release_netdev;
1096	}
 
1097
1098	rc = init_prot_info(prot, crypto_info, cipher_desc);
1099	if (rc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1100		goto release_netdev;
 
1101
1102	iv = crypto_info_iv(crypto_info, cipher_desc);
1103	rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc);
1104
1105	memcpy(ctx->tx.iv + cipher_desc->salt, iv, cipher_desc->iv);
1106	memcpy(ctx->tx.rec_seq, rec_seq, cipher_desc->rec_seq);
 
 
 
 
1107
1108	start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
1109	if (!start_marker_record) {
1110		rc = -ENOMEM;
1111		goto release_netdev;
1112	}
1113
1114	offload_ctx = alloc_offload_ctx_tx(ctx);
1115	if (!offload_ctx) {
1116		rc = -ENOMEM;
1117		goto free_marker_record;
1118	}
1119
1120	rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
1121	if (rc)
1122		goto free_offload_ctx;
1123
 
 
 
 
1124	start_marker_record->end_seq = tcp_sk(sk)->write_seq;
1125	start_marker_record->len = 0;
1126	start_marker_record->num_frags = 0;
 
 
 
 
 
1127	list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
 
 
 
1128
1129	clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
1130	ctx->push_pending_record = tls_device_push_pending_record;
1131
1132	/* TLS offload is greatly simplified if we don't send
1133	 * SKBs where only part of the payload needs to be encrypted.
1134	 * So mark the last skb in the write queue as end of record.
1135	 */
1136	tcp_write_collapse_fence(sk);
 
 
1137
1138	/* Avoid offloading if the device is down
1139	 * We don't want to offload new flows after
1140	 * the NETDEV_DOWN event
1141	 *
1142	 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1143	 * handler thus protecting from the device going down before
1144	 * ctx was added to tls_device_list.
1145	 */
1146	down_read(&device_offload_lock);
1147	if (!(netdev->flags & IFF_UP)) {
1148		rc = -EINVAL;
1149		goto release_lock;
1150	}
1151
1152	ctx->priv_ctx_tx = offload_ctx;
1153	rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
1154					     &ctx->crypto_send.info,
1155					     tcp_sk(sk)->write_seq);
1156	trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
1157				     tcp_sk(sk)->write_seq, rec_seq, rc);
1158	if (rc)
1159		goto release_lock;
1160
1161	tls_device_attach(ctx, sk, netdev);
1162	up_read(&device_offload_lock);
1163
1164	/* following this assignment tls_is_skb_tx_device_offloaded
1165	 * will return true and the context might be accessed
1166	 * by the netdev's xmit function.
1167	 */
1168	smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
1169	dev_put(netdev);
1170
1171	return 0;
1172
1173release_lock:
1174	up_read(&device_offload_lock);
1175	clean_acked_data_disable(inet_csk(sk));
1176	crypto_free_aead(offload_ctx->aead_send);
1177free_offload_ctx:
1178	kfree(offload_ctx);
1179	ctx->priv_ctx_tx = NULL;
1180free_marker_record:
1181	kfree(start_marker_record);
 
 
 
 
1182release_netdev:
1183	dev_put(netdev);
1184	return rc;
1185}
1186
1187int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
1188{
1189	struct tls12_crypto_info_aes_gcm_128 *info;
1190	struct tls_offload_context_rx *context;
1191	struct net_device *netdev;
1192	int rc = 0;
1193
1194	if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
1195		return -EOPNOTSUPP;
1196
1197	netdev = get_netdev_for_sock(sk);
1198	if (!netdev) {
1199		pr_err_ratelimited("%s: netdev not found\n", __func__);
1200		return -EINVAL;
1201	}
1202
1203	if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
1204		rc = -EOPNOTSUPP;
1205		goto release_netdev;
1206	}
1207
1208	/* Avoid offloading if the device is down
1209	 * We don't want to offload new flows after
1210	 * the NETDEV_DOWN event
1211	 *
1212	 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1213	 * handler thus protecting from the device going down before
1214	 * ctx was added to tls_device_list.
1215	 */
1216	down_read(&device_offload_lock);
1217	if (!(netdev->flags & IFF_UP)) {
1218		rc = -EINVAL;
1219		goto release_lock;
1220	}
1221
1222	context = kzalloc(sizeof(*context), GFP_KERNEL);
1223	if (!context) {
1224		rc = -ENOMEM;
1225		goto release_lock;
1226	}
1227	context->resync_nh_reset = 1;
1228
1229	ctx->priv_ctx_rx = context;
1230	rc = tls_set_sw_offload(sk, 0);
1231	if (rc)
1232		goto release_ctx;
1233
1234	rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
1235					     &ctx->crypto_recv.info,
1236					     tcp_sk(sk)->copied_seq);
1237	info = (void *)&ctx->crypto_recv.info;
1238	trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX,
1239				     tcp_sk(sk)->copied_seq, info->rec_seq, rc);
1240	if (rc)
1241		goto free_sw_resources;
1242
1243	tls_device_attach(ctx, sk, netdev);
1244	up_read(&device_offload_lock);
1245
1246	dev_put(netdev);
1247
1248	return 0;
1249
1250free_sw_resources:
1251	up_read(&device_offload_lock);
1252	tls_sw_free_resources_rx(sk);
1253	down_read(&device_offload_lock);
1254release_ctx:
1255	ctx->priv_ctx_rx = NULL;
1256release_lock:
1257	up_read(&device_offload_lock);
1258release_netdev:
1259	dev_put(netdev);
1260	return rc;
1261}
1262
1263void tls_device_offload_cleanup_rx(struct sock *sk)
1264{
1265	struct tls_context *tls_ctx = tls_get_ctx(sk);
1266	struct net_device *netdev;
1267
1268	down_read(&device_offload_lock);
1269	netdev = rcu_dereference_protected(tls_ctx->netdev,
1270					   lockdep_is_held(&device_offload_lock));
1271	if (!netdev)
1272		goto out;
1273
1274	netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
1275					TLS_OFFLOAD_CTX_DIR_RX);
1276
1277	if (tls_ctx->tx_conf != TLS_HW) {
1278		dev_put(netdev);
1279		rcu_assign_pointer(tls_ctx->netdev, NULL);
1280	} else {
1281		set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
1282	}
1283out:
1284	up_read(&device_offload_lock);
1285	tls_sw_release_resources_rx(sk);
1286}
1287
1288static int tls_device_down(struct net_device *netdev)
1289{
1290	struct tls_context *ctx, *tmp;
1291	unsigned long flags;
1292	LIST_HEAD(list);
1293
1294	/* Request a write lock to block new offload attempts */
1295	down_write(&device_offload_lock);
1296
1297	spin_lock_irqsave(&tls_device_lock, flags);
1298	list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
1299		struct net_device *ctx_netdev =
1300			rcu_dereference_protected(ctx->netdev,
1301						  lockdep_is_held(&device_offload_lock));
1302
1303		if (ctx_netdev != netdev ||
1304		    !refcount_inc_not_zero(&ctx->refcount))
1305			continue;
1306
1307		list_move(&ctx->list, &list);
1308	}
1309	spin_unlock_irqrestore(&tls_device_lock, flags);
1310
1311	list_for_each_entry_safe(ctx, tmp, &list, list)	{
1312		/* Stop offloaded TX and switch to the fallback.
1313		 * tls_is_skb_tx_device_offloaded will return false.
1314		 */
1315		WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
1316
1317		/* Stop the RX and TX resync.
1318		 * tls_dev_resync must not be called after tls_dev_del.
1319		 */
1320		rcu_assign_pointer(ctx->netdev, NULL);
1321
1322		/* Start skipping the RX resync logic completely. */
1323		set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
1324
1325		/* Sync with inflight packets. After this point:
1326		 * TX: no non-encrypted packets will be passed to the driver.
1327		 * RX: resync requests from the driver will be ignored.
1328		 */
1329		synchronize_net();
1330
1331		/* Release the offload context on the driver side. */
1332		if (ctx->tx_conf == TLS_HW)
1333			netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1334							TLS_OFFLOAD_CTX_DIR_TX);
1335		if (ctx->rx_conf == TLS_HW &&
1336		    !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
1337			netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1338							TLS_OFFLOAD_CTX_DIR_RX);
1339
1340		dev_put(netdev);
1341
1342		/* Move the context to a separate list for two reasons:
1343		 * 1. When the context is deallocated, list_del is called.
1344		 * 2. It's no longer an offloaded context, so we don't want to
1345		 *    run offload-specific code on this context.
1346		 */
1347		spin_lock_irqsave(&tls_device_lock, flags);
1348		list_move_tail(&ctx->list, &tls_device_down_list);
1349		spin_unlock_irqrestore(&tls_device_lock, flags);
1350
1351		/* Device contexts for RX and TX will be freed in on sk_destruct
1352		 * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
1353		 * Now release the ref taken above.
1354		 */
1355		if (refcount_dec_and_test(&ctx->refcount)) {
1356			/* sk_destruct ran after tls_device_down took a ref, and
1357			 * it returned early. Complete the destruction here.
1358			 */
1359			list_del(&ctx->list);
1360			tls_device_free_ctx(ctx);
1361		}
1362	}
1363
1364	up_write(&device_offload_lock);
1365
1366	flush_workqueue(destruct_wq);
1367
1368	return NOTIFY_DONE;
1369}
1370
1371static int tls_dev_event(struct notifier_block *this, unsigned long event,
1372			 void *ptr)
1373{
1374	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1375
1376	if (!dev->tlsdev_ops &&
1377	    !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
1378		return NOTIFY_DONE;
1379
1380	switch (event) {
1381	case NETDEV_REGISTER:
1382	case NETDEV_FEAT_CHANGE:
1383		if (netif_is_bond_master(dev))
1384			return NOTIFY_DONE;
1385		if ((dev->features & NETIF_F_HW_TLS_RX) &&
1386		    !dev->tlsdev_ops->tls_dev_resync)
1387			return NOTIFY_BAD;
1388
1389		if  (dev->tlsdev_ops &&
1390		     dev->tlsdev_ops->tls_dev_add &&
1391		     dev->tlsdev_ops->tls_dev_del)
1392			return NOTIFY_DONE;
1393		else
1394			return NOTIFY_BAD;
1395	case NETDEV_DOWN:
1396		return tls_device_down(dev);
1397	}
1398	return NOTIFY_DONE;
1399}
1400
1401static struct notifier_block tls_dev_notifier = {
1402	.notifier_call	= tls_dev_event,
1403};
1404
1405int __init tls_device_init(void)
1406{
1407	int err;
1408
1409	dummy_page = alloc_page(GFP_KERNEL);
1410	if (!dummy_page)
1411		return -ENOMEM;
1412
1413	destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0);
1414	if (!destruct_wq) {
1415		err = -ENOMEM;
1416		goto err_free_dummy;
1417	}
1418
1419	err = register_netdevice_notifier(&tls_dev_notifier);
1420	if (err)
1421		goto err_destroy_wq;
1422
1423	return 0;
1424
1425err_destroy_wq:
1426	destroy_workqueue(destruct_wq);
1427err_free_dummy:
1428	put_page(dummy_page);
1429	return err;
1430}
1431
1432void __exit tls_device_cleanup(void)
1433{
1434	unregister_netdevice_notifier(&tls_dev_notifier);
1435	destroy_workqueue(destruct_wq);
1436	clean_acked_data_flush();
1437	put_page(dummy_page);
1438}