Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* Client connection-specific management code.
   3 *
   4 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
   5 * Written by David Howells (dhowells@redhat.com)
   6 *
   7 * Client connections need to be cached for a little while after they've made a
   8 * call so as to handle retransmitted DATA packets in case the server didn't
   9 * receive the final ACK or terminating ABORT we sent it.
  10 *
  11 * Client connections can be in one of a number of cache states:
  12 *
  13 *  (1) INACTIVE - The connection is not held in any list and may not have been
  14 *      exposed to the world.  If it has been previously exposed, it was
  15 *      discarded from the idle list after expiring.
  16 *
  17 *  (2) WAITING - The connection is waiting for the number of client conns to
  18 *      drop below the maximum capacity.  Calls may be in progress upon it from
  19 *      when it was active and got culled.
  20 *
  21 *	The connection is on the rxrpc_waiting_client_conns list which is kept
  22 *	in to-be-granted order.  Culled conns with waiters go to the back of
  23 *	the queue just like new conns.
  24 *
  25 *  (3) ACTIVE - The connection has at least one call in progress upon it, it
  26 *      may freely grant available channels to new calls and calls may be
  27 *      waiting on it for channels to become available.
  28 *
  29 *	The connection is on the rxnet->active_client_conns list which is kept
  30 *	in activation order for culling purposes.
  31 *
  32 *	rxrpc_nr_active_client_conns is held incremented also.
  33 *
  34 *  (4) UPGRADE - As for ACTIVE, but only one call may be in progress and is
  35 *      being used to probe for service upgrade.
  36 *
  37 *  (5) CULLED - The connection got summarily culled to try and free up
  38 *      capacity.  Calls currently in progress on the connection are allowed to
  39 *      continue, but new calls will have to wait.  There can be no waiters in
  40 *      this state - the conn would have to go to the WAITING state instead.
  41 *
  42 *  (6) IDLE - The connection has no calls in progress upon it and must have
  43 *      been exposed to the world (ie. the EXPOSED flag must be set).  When it
  44 *      expires, the EXPOSED flag is cleared and the connection transitions to
  45 *      the INACTIVE state.
  46 *
  47 *	The connection is on the rxnet->idle_client_conns list which is kept in
  48 *	order of how soon they'll expire.
  49 *
  50 * There are flags of relevance to the cache:
  51 *
  52 *  (1) EXPOSED - The connection ID got exposed to the world.  If this flag is
  53 *      set, an extra ref is added to the connection preventing it from being
  54 *      reaped when it has no calls outstanding.  This flag is cleared and the
  55 *      ref dropped when a conn is discarded from the idle list.
  56 *
  57 *      This allows us to move terminal call state retransmission to the
  58 *      connection and to discard the call immediately we think it is done
  59 *      with.  It also give us a chance to reuse the connection.
  60 *
  61 *  (2) DONT_REUSE - The connection should be discarded as soon as possible and
  62 *      should not be reused.  This is set when an exclusive connection is used
  63 *      or a call ID counter overflows.
  64 *
  65 * The caching state may only be changed if the cache lock is held.
  66 *
  67 * There are two idle client connection expiry durations.  If the total number
  68 * of connections is below the reap threshold, we use the normal duration; if
  69 * it's above, we use the fast duration.
  70 */
  71
  72#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  73
  74#include <linux/slab.h>
  75#include <linux/idr.h>
  76#include <linux/timer.h>
  77#include <linux/sched/signal.h>
  78
  79#include "ar-internal.h"
  80
  81__read_mostly unsigned int rxrpc_max_client_connections = 1000;
  82__read_mostly unsigned int rxrpc_reap_client_connections = 900;
  83__read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
  84__read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
  85
  86/*
  87 * We use machine-unique IDs for our client connections.
  88 */
  89DEFINE_IDR(rxrpc_client_conn_ids);
  90static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
  91
  92static void rxrpc_cull_active_client_conns(struct rxrpc_net *);
  93
  94/*
  95 * Get a connection ID and epoch for a client connection from the global pool.
  96 * The connection struct pointer is then recorded in the idr radix tree.  The
  97 * epoch doesn't change until the client is rebooted (or, at least, unless the
  98 * module is unloaded).
  99 */
 100static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
 101					  gfp_t gfp)
 102{
 103	struct rxrpc_net *rxnet = conn->params.local->rxnet;
 104	int id;
 105
 106	_enter("");
 107
 108	idr_preload(gfp);
 109	spin_lock(&rxrpc_conn_id_lock);
 110
 111	id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn,
 112			      1, 0x40000000, GFP_NOWAIT);
 113	if (id < 0)
 114		goto error;
 115
 116	spin_unlock(&rxrpc_conn_id_lock);
 117	idr_preload_end();
 118
 119	conn->proto.epoch = rxnet->epoch;
 120	conn->proto.cid = id << RXRPC_CIDSHIFT;
 121	set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
 122	_leave(" [CID %x]", conn->proto.cid);
 123	return 0;
 124
 125error:
 126	spin_unlock(&rxrpc_conn_id_lock);
 127	idr_preload_end();
 128	_leave(" = %d", id);
 129	return id;
 130}
 131
 132/*
 133 * Release a connection ID for a client connection from the global pool.
 134 */
 135static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
 136{
 137	if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
 138		spin_lock(&rxrpc_conn_id_lock);
 139		idr_remove(&rxrpc_client_conn_ids,
 140			   conn->proto.cid >> RXRPC_CIDSHIFT);
 141		spin_unlock(&rxrpc_conn_id_lock);
 142	}
 143}
 144
 145/*
 146 * Destroy the client connection ID tree.
 147 */
 148void rxrpc_destroy_client_conn_ids(void)
 149{
 150	struct rxrpc_connection *conn;
 151	int id;
 152
 153	if (!idr_is_empty(&rxrpc_client_conn_ids)) {
 154		idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
 155			pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
 156			       conn, atomic_read(&conn->usage));
 157		}
 158		BUG();
 159	}
 160
 161	idr_destroy(&rxrpc_client_conn_ids);
 162}
 163
 164/*
 165 * Allocate a client connection.
 166 */
 167static struct rxrpc_connection *
 168rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
 169{
 170	struct rxrpc_connection *conn;
 171	struct rxrpc_net *rxnet = cp->local->rxnet;
 172	int ret;
 173
 174	_enter("");
 175
 176	conn = rxrpc_alloc_connection(gfp);
 177	if (!conn) {
 178		_leave(" = -ENOMEM");
 179		return ERR_PTR(-ENOMEM);
 180	}
 181
 182	atomic_set(&conn->usage, 1);
 183	if (cp->exclusive)
 184		__set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
 185	if (cp->upgrade)
 186		__set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
 187
 188	conn->params		= *cp;
 189	conn->out_clientflag	= RXRPC_CLIENT_INITIATED;
 190	conn->state		= RXRPC_CONN_CLIENT;
 191	conn->service_id	= cp->service_id;
 192
 193	ret = rxrpc_get_client_connection_id(conn, gfp);
 194	if (ret < 0)
 195		goto error_0;
 196
 197	ret = rxrpc_init_client_conn_security(conn);
 198	if (ret < 0)
 199		goto error_1;
 200
 201	ret = conn->security->prime_packet_security(conn);
 202	if (ret < 0)
 203		goto error_2;
 204
 205	atomic_inc(&rxnet->nr_conns);
 206	write_lock(&rxnet->conn_lock);
 207	list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
 208	write_unlock(&rxnet->conn_lock);
 209
 210	/* We steal the caller's peer ref. */
 211	cp->peer = NULL;
 212	rxrpc_get_local(conn->params.local);
 213	key_get(conn->params.key);
 214
 215	trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client,
 216			 atomic_read(&conn->usage),
 217			 __builtin_return_address(0));
 218	trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
 219	_leave(" = %p", conn);
 220	return conn;
 221
 222error_2:
 223	conn->security->clear(conn);
 224error_1:
 225	rxrpc_put_client_connection_id(conn);
 226error_0:
 227	kfree(conn);
 228	_leave(" = %d", ret);
 229	return ERR_PTR(ret);
 230}
 231
 232/*
 233 * Determine if a connection may be reused.
 234 */
 235static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
 236{
 237	struct rxrpc_net *rxnet = conn->params.local->rxnet;
 238	int id_cursor, id, distance, limit;
 239
 240	if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
 241		goto dont_reuse;
 242
 243	if (conn->proto.epoch != rxnet->epoch)
 244		goto mark_dont_reuse;
 245
 246	/* The IDR tree gets very expensive on memory if the connection IDs are
 247	 * widely scattered throughout the number space, so we shall want to
 248	 * kill off connections that, say, have an ID more than about four
 249	 * times the maximum number of client conns away from the current
 250	 * allocation point to try and keep the IDs concentrated.
 251	 */
 252	id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
 253	id = conn->proto.cid >> RXRPC_CIDSHIFT;
 254	distance = id - id_cursor;
 255	if (distance < 0)
 256		distance = -distance;
 257	limit = max(rxrpc_max_client_connections * 4, 1024U);
 258	if (distance > limit)
 259		goto mark_dont_reuse;
 260
 261	return true;
 262
 263mark_dont_reuse:
 264	set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
 265dont_reuse:
 266	return false;
 267}
 268
 269/*
 270 * Create or find a client connection to use for a call.
 271 *
 272 * If we return with a connection, the call will be on its waiting list.  It's
 273 * left to the caller to assign a channel and wake up the call.
 274 */
 275static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
 276				 struct rxrpc_call *call,
 277				 struct rxrpc_conn_parameters *cp,
 278				 struct sockaddr_rxrpc *srx,
 279				 gfp_t gfp)
 280{
 281	struct rxrpc_connection *conn, *candidate = NULL;
 282	struct rxrpc_local *local = cp->local;
 283	struct rb_node *p, **pp, *parent;
 284	long diff;
 285	int ret = -ENOMEM;
 286
 287	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
 288
 289	cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
 290	if (!cp->peer)
 291		goto error;
 292
 293	call->cong_cwnd = cp->peer->cong_cwnd;
 294	if (call->cong_cwnd >= call->cong_ssthresh)
 295		call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
 296	else
 297		call->cong_mode = RXRPC_CALL_SLOW_START;
 298
 299	/* If the connection is not meant to be exclusive, search the available
 300	 * connections to see if the connection we want to use already exists.
 301	 */
 302	if (!cp->exclusive) {
 303		_debug("search 1");
 304		spin_lock(&local->client_conns_lock);
 305		p = local->client_conns.rb_node;
 306		while (p) {
 307			conn = rb_entry(p, struct rxrpc_connection, client_node);
 308
 309#define cmp(X) ((long)conn->params.X - (long)cp->X)
 310			diff = (cmp(peer) ?:
 311				cmp(key) ?:
 312				cmp(security_level) ?:
 313				cmp(upgrade));
 314#undef cmp
 315			if (diff < 0) {
 316				p = p->rb_left;
 317			} else if (diff > 0) {
 318				p = p->rb_right;
 319			} else {
 320				if (rxrpc_may_reuse_conn(conn) &&
 321				    rxrpc_get_connection_maybe(conn))
 322					goto found_extant_conn;
 323				/* The connection needs replacing.  It's better
 324				 * to effect that when we have something to
 325				 * replace it with so that we don't have to
 326				 * rebalance the tree twice.
 327				 */
 328				break;
 329			}
 330		}
 331		spin_unlock(&local->client_conns_lock);
 332	}
 333
 334	/* There wasn't a connection yet or we need an exclusive connection.
 335	 * We need to create a candidate and then potentially redo the search
 336	 * in case we're racing with another thread also trying to connect on a
 337	 * shareable connection.
 338	 */
 339	_debug("new conn");
 340	candidate = rxrpc_alloc_client_connection(cp, gfp);
 341	if (IS_ERR(candidate)) {
 342		ret = PTR_ERR(candidate);
 343		goto error_peer;
 344	}
 345
 346	/* Add the call to the new connection's waiting list in case we're
 347	 * going to have to wait for the connection to come live.  It's our
 348	 * connection, so we want first dibs on the channel slots.  We would
 349	 * normally have to take channel_lock but we do this before anyone else
 350	 * can see the connection.
 351	 */
 352	list_add(&call->chan_wait_link, &candidate->waiting_calls);
 353
 354	if (cp->exclusive) {
 355		call->conn = candidate;
 356		call->security = candidate->security;
 357		call->security_ix = candidate->security_ix;
 358		call->service_id = candidate->service_id;
 359		_leave(" = 0 [exclusive %d]", candidate->debug_id);
 360		return 0;
 361	}
 362
 363	/* Publish the new connection for userspace to find.  We need to redo
 364	 * the search before doing this lest we race with someone else adding a
 365	 * conflicting instance.
 366	 */
 367	_debug("search 2");
 368	spin_lock(&local->client_conns_lock);
 369
 370	pp = &local->client_conns.rb_node;
 371	parent = NULL;
 372	while (*pp) {
 373		parent = *pp;
 374		conn = rb_entry(parent, struct rxrpc_connection, client_node);
 375
 376#define cmp(X) ((long)conn->params.X - (long)candidate->params.X)
 377		diff = (cmp(peer) ?:
 378			cmp(key) ?:
 379			cmp(security_level) ?:
 380			cmp(upgrade));
 381#undef cmp
 382		if (diff < 0) {
 383			pp = &(*pp)->rb_left;
 384		} else if (diff > 0) {
 385			pp = &(*pp)->rb_right;
 386		} else {
 387			if (rxrpc_may_reuse_conn(conn) &&
 388			    rxrpc_get_connection_maybe(conn))
 389				goto found_extant_conn;
 390			/* The old connection is from an outdated epoch. */
 391			_debug("replace conn");
 392			clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags);
 393			rb_replace_node(&conn->client_node,
 394					&candidate->client_node,
 395					&local->client_conns);
 396			trace_rxrpc_client(conn, -1, rxrpc_client_replace);
 397			goto candidate_published;
 398		}
 399	}
 400
 401	_debug("new conn");
 402	rb_link_node(&candidate->client_node, parent, pp);
 403	rb_insert_color(&candidate->client_node, &local->client_conns);
 404
 405candidate_published:
 406	set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
 407	call->conn = candidate;
 408	call->security = candidate->security;
 409	call->security_ix = candidate->security_ix;
 410	call->service_id = candidate->service_id;
 411	spin_unlock(&local->client_conns_lock);
 412	_leave(" = 0 [new %d]", candidate->debug_id);
 413	return 0;
 414
 415	/* We come here if we found a suitable connection already in existence.
 416	 * Discard any candidate we may have allocated, and try to get a
 417	 * channel on this one.
 418	 */
 419found_extant_conn:
 420	_debug("found conn");
 421	spin_unlock(&local->client_conns_lock);
 422
 423	if (candidate) {
 424		trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate);
 425		rxrpc_put_connection(candidate);
 426		candidate = NULL;
 427	}
 428
 429	spin_lock(&conn->channel_lock);
 430	call->conn = conn;
 431	call->security = conn->security;
 432	call->security_ix = conn->security_ix;
 433	call->service_id = conn->service_id;
 434	list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
 435	spin_unlock(&conn->channel_lock);
 436	_leave(" = 0 [extant %d]", conn->debug_id);
 437	return 0;
 438
 439error_peer:
 440	rxrpc_put_peer(cp->peer);
 441	cp->peer = NULL;
 442error:
 443	_leave(" = %d", ret);
 444	return ret;
 445}
 446
 447/*
 448 * Activate a connection.
 449 */
 450static void rxrpc_activate_conn(struct rxrpc_net *rxnet,
 451				struct rxrpc_connection *conn)
 452{
 453	if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) {
 454		trace_rxrpc_client(conn, -1, rxrpc_client_to_upgrade);
 455		conn->cache_state = RXRPC_CONN_CLIENT_UPGRADE;
 456	} else {
 457		trace_rxrpc_client(conn, -1, rxrpc_client_to_active);
 458		conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
 459	}
 460	rxnet->nr_active_client_conns++;
 461	list_move_tail(&conn->cache_link, &rxnet->active_client_conns);
 462}
 463
 464/*
 465 * Attempt to animate a connection for a new call.
 466 *
 467 * If it's not exclusive, the connection is in the endpoint tree, and we're in
 468 * the conn's list of those waiting to grab a channel.  There is, however, a
 469 * limit on the number of live connections allowed at any one time, so we may
 470 * have to wait for capacity to become available.
 471 *
 472 * Note that a connection on the waiting queue might *also* have active
 473 * channels if it has been culled to make space and then re-requested by a new
 474 * call.
 475 */
 476static void rxrpc_animate_client_conn(struct rxrpc_net *rxnet,
 477				      struct rxrpc_connection *conn)
 478{
 479	unsigned int nr_conns;
 480
 481	_enter("%d,%d", conn->debug_id, conn->cache_state);
 482
 483	if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE ||
 484	    conn->cache_state == RXRPC_CONN_CLIENT_UPGRADE)
 485		goto out;
 486
 487	spin_lock(&rxnet->client_conn_cache_lock);
 488
 489	nr_conns = rxnet->nr_client_conns;
 490	if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
 491		trace_rxrpc_client(conn, -1, rxrpc_client_count);
 492		rxnet->nr_client_conns = nr_conns + 1;
 493	}
 494
 495	switch (conn->cache_state) {
 496	case RXRPC_CONN_CLIENT_ACTIVE:
 497	case RXRPC_CONN_CLIENT_UPGRADE:
 498	case RXRPC_CONN_CLIENT_WAITING:
 499		break;
 500
 501	case RXRPC_CONN_CLIENT_INACTIVE:
 502	case RXRPC_CONN_CLIENT_CULLED:
 503	case RXRPC_CONN_CLIENT_IDLE:
 504		if (nr_conns >= rxrpc_max_client_connections)
 505			goto wait_for_capacity;
 506		goto activate_conn;
 507
 508	default:
 509		BUG();
 510	}
 511
 512out_unlock:
 513	spin_unlock(&rxnet->client_conn_cache_lock);
 514out:
 515	_leave(" [%d]", conn->cache_state);
 516	return;
 517
 518activate_conn:
 519	_debug("activate");
 520	rxrpc_activate_conn(rxnet, conn);
 521	goto out_unlock;
 522
 523wait_for_capacity:
 524	_debug("wait");
 525	trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
 526	conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
 527	list_move_tail(&conn->cache_link, &rxnet->waiting_client_conns);
 528	goto out_unlock;
 529}
 530
 531/*
 532 * Deactivate a channel.
 533 */
 534static void rxrpc_deactivate_one_channel(struct rxrpc_connection *conn,
 535					 unsigned int channel)
 536{
 537	struct rxrpc_channel *chan = &conn->channels[channel];
 538
 539	rcu_assign_pointer(chan->call, NULL);
 540	conn->active_chans &= ~(1 << channel);
 541}
 542
 543/*
 544 * Assign a channel to the call at the front of the queue and wake the call up.
 545 * We don't increment the callNumber counter until this number has been exposed
 546 * to the world.
 547 */
 548static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
 549				       unsigned int channel)
 550{
 551	struct rxrpc_channel *chan = &conn->channels[channel];
 552	struct rxrpc_call *call = list_entry(conn->waiting_calls.next,
 553					     struct rxrpc_call, chan_wait_link);
 554	u32 call_id = chan->call_counter + 1;
 555
 556	trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
 557
 558	/* Cancel the final ACK on the previous call if it hasn't been sent yet
 559	 * as the DATA packet will implicitly ACK it.
 560	 */
 561	clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
 562
 563	write_lock_bh(&call->state_lock);
 564	call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
 565	write_unlock_bh(&call->state_lock);
 566
 567	rxrpc_see_call(call);
 568	list_del_init(&call->chan_wait_link);
 569	conn->active_chans |= 1 << channel;
 570	call->peer	= rxrpc_get_peer(conn->params.peer);
 571	call->cid	= conn->proto.cid | channel;
 572	call->call_id	= call_id;
 573
 574	trace_rxrpc_connect_call(call);
 575	_net("CONNECT call %08x:%08x as call %d on conn %d",
 576	     call->cid, call->call_id, call->debug_id, conn->debug_id);
 577
 578	/* Paired with the read barrier in rxrpc_wait_for_channel().  This
 579	 * orders cid and epoch in the connection wrt to call_id without the
 580	 * need to take the channel_lock.
 581	 *
 582	 * We provisionally assign a callNumber at this point, but we don't
 583	 * confirm it until the call is about to be exposed.
 584	 *
 585	 * TODO: Pair with a barrier in the data_ready handler when that looks
 586	 * at the call ID through a connection channel.
 587	 */
 588	smp_wmb();
 589	chan->call_id	= call_id;
 590	chan->call_debug_id = call->debug_id;
 591	rcu_assign_pointer(chan->call, call);
 592	wake_up(&call->waitq);
 593}
 594
 595/*
 596 * Assign channels and callNumbers to waiting calls with channel_lock
 597 * held by caller.
 598 */
 599static void rxrpc_activate_channels_locked(struct rxrpc_connection *conn)
 600{
 601	u8 avail, mask;
 602
 603	switch (conn->cache_state) {
 604	case RXRPC_CONN_CLIENT_ACTIVE:
 605		mask = RXRPC_ACTIVE_CHANS_MASK;
 606		break;
 607	case RXRPC_CONN_CLIENT_UPGRADE:
 608		mask = 0x01;
 609		break;
 610	default:
 611		return;
 612	}
 613
 614	while (!list_empty(&conn->waiting_calls) &&
 615	       (avail = ~conn->active_chans,
 616		avail &= mask,
 617		avail != 0))
 618		rxrpc_activate_one_channel(conn, __ffs(avail));
 619}
 620
 621/*
 622 * Assign channels and callNumbers to waiting calls.
 623 */
 624static void rxrpc_activate_channels(struct rxrpc_connection *conn)
 625{
 626	_enter("%d", conn->debug_id);
 627
 628	trace_rxrpc_client(conn, -1, rxrpc_client_activate_chans);
 629
 630	if (conn->active_chans == RXRPC_ACTIVE_CHANS_MASK)
 631		return;
 632
 633	spin_lock(&conn->channel_lock);
 634	rxrpc_activate_channels_locked(conn);
 635	spin_unlock(&conn->channel_lock);
 636	_leave("");
 637}
 638
 639/*
 640 * Wait for a callNumber and a channel to be granted to a call.
 641 */
 642static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp)
 643{
 644	int ret = 0;
 645
 646	_enter("%d", call->debug_id);
 647
 648	if (!call->call_id) {
 649		DECLARE_WAITQUEUE(myself, current);
 650
 651		if (!gfpflags_allow_blocking(gfp)) {
 652			ret = -EAGAIN;
 653			goto out;
 654		}
 655
 656		add_wait_queue_exclusive(&call->waitq, &myself);
 657		for (;;) {
 658			switch (call->interruptibility) {
 659			case RXRPC_INTERRUPTIBLE:
 660			case RXRPC_PREINTERRUPTIBLE:
 661				set_current_state(TASK_INTERRUPTIBLE);
 662				break;
 663			case RXRPC_UNINTERRUPTIBLE:
 664			default:
 665				set_current_state(TASK_UNINTERRUPTIBLE);
 666				break;
 667			}
 668			if (call->call_id)
 669				break;
 670			if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
 671			     call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
 672			    signal_pending(current)) {
 673				ret = -ERESTARTSYS;
 674				break;
 675			}
 676			schedule();
 677		}
 678		remove_wait_queue(&call->waitq, &myself);
 679		__set_current_state(TASK_RUNNING);
 680	}
 681
 682	/* Paired with the write barrier in rxrpc_activate_one_channel(). */
 683	smp_rmb();
 684
 685out:
 686	_leave(" = %d", ret);
 687	return ret;
 688}
 689
 690/*
 691 * find a connection for a call
 692 * - called in process context with IRQs enabled
 693 */
 694int rxrpc_connect_call(struct rxrpc_sock *rx,
 695		       struct rxrpc_call *call,
 696		       struct rxrpc_conn_parameters *cp,
 697		       struct sockaddr_rxrpc *srx,
 698		       gfp_t gfp)
 699{
 700	struct rxrpc_net *rxnet = cp->local->rxnet;
 701	int ret;
 702
 703	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
 704
 705	rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
 706	rxrpc_cull_active_client_conns(rxnet);
 707
 708	ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp);
 709	if (ret < 0)
 710		goto out;
 711
 712	rxrpc_animate_client_conn(rxnet, call->conn);
 713	rxrpc_activate_channels(call->conn);
 714
 715	ret = rxrpc_wait_for_channel(call, gfp);
 716	if (ret < 0) {
 717		trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
 718		rxrpc_disconnect_client_call(call);
 719		goto out;
 720	}
 721
 722	spin_lock_bh(&call->conn->params.peer->lock);
 723	hlist_add_head_rcu(&call->error_link,
 724			   &call->conn->params.peer->error_targets);
 725	spin_unlock_bh(&call->conn->params.peer->lock);
 726
 727out:
 728	_leave(" = %d", ret);
 729	return ret;
 730}
 731
 732/*
 733 * Note that a connection is about to be exposed to the world.  Once it is
 734 * exposed, we maintain an extra ref on it that stops it from being summarily
 735 * discarded before it's (a) had a chance to deal with retransmission and (b)
 736 * had a chance at re-use (the per-connection security negotiation is
 737 * expensive).
 738 */
 739static void rxrpc_expose_client_conn(struct rxrpc_connection *conn,
 740				     unsigned int channel)
 741{
 742	if (!test_and_set_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
 743		trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
 744		rxrpc_get_connection(conn);
 745	}
 746}
 747
 748/*
 749 * Note that a call, and thus a connection, is about to be exposed to the
 750 * world.
 751 */
 752void rxrpc_expose_client_call(struct rxrpc_call *call)
 753{
 754	unsigned int channel = call->cid & RXRPC_CHANNELMASK;
 755	struct rxrpc_connection *conn = call->conn;
 756	struct rxrpc_channel *chan = &conn->channels[channel];
 757
 758	if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
 759		/* Mark the call ID as being used.  If the callNumber counter
 760		 * exceeds ~2 billion, we kill the connection after its
 761		 * outstanding calls have finished so that the counter doesn't
 762		 * wrap.
 763		 */
 764		chan->call_counter++;
 765		if (chan->call_counter >= INT_MAX)
 766			set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
 767		rxrpc_expose_client_conn(conn, channel);
 768	}
 769}
 770
 771/*
 772 * Set the reap timer.
 773 */
 774static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
 775{
 776	unsigned long now = jiffies;
 777	unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
 778
 779	if (rxnet->live)
 780		timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
 781}
 782
 783/*
 784 * Disconnect a client call.
 785 */
 786void rxrpc_disconnect_client_call(struct rxrpc_call *call)
 787{
 788	struct rxrpc_connection *conn = call->conn;
 789	struct rxrpc_channel *chan = NULL;
 790	struct rxrpc_net *rxnet = conn->params.local->rxnet;
 791	unsigned int channel = -1;
 792	u32 cid;
 793
 794	spin_lock(&conn->channel_lock);
 795	set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
 796
 797	cid = call->cid;
 798	if (cid) {
 799		channel = cid & RXRPC_CHANNELMASK;
 800		chan = &conn->channels[channel];
 801	}
 802	trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
 803
 804	/* Calls that have never actually been assigned a channel can simply be
 805	 * discarded.  If the conn didn't get used either, it will follow
 806	 * immediately unless someone else grabs it in the meantime.
 807	 */
 808	if (!list_empty(&call->chan_wait_link)) {
 809		_debug("call is waiting");
 810		ASSERTCMP(call->call_id, ==, 0);
 811		ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
 812		list_del_init(&call->chan_wait_link);
 813
 814		trace_rxrpc_client(conn, channel, rxrpc_client_chan_unstarted);
 815
 816		/* We must deactivate or idle the connection if it's now
 817		 * waiting for nothing.
 818		 */
 819		spin_lock(&rxnet->client_conn_cache_lock);
 820		if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING &&
 821		    list_empty(&conn->waiting_calls) &&
 822		    !conn->active_chans)
 823			goto idle_connection;
 824		goto out;
 825	}
 826
 827	if (rcu_access_pointer(chan->call) != call) {
 828		spin_unlock(&conn->channel_lock);
 829		BUG();
 830	}
 831
 832	/* If a client call was exposed to the world, we save the result for
 833	 * retransmission.
 834	 *
 835	 * We use a barrier here so that the call number and abort code can be
 836	 * read without needing to take a lock.
 837	 *
 838	 * TODO: Make the incoming packet handler check this and handle
 839	 * terminal retransmission without requiring access to the call.
 840	 */
 841	if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
 842		_debug("exposed %u,%u", call->call_id, call->abort_code);
 843		__rxrpc_disconnect_call(conn, call);
 844	}
 845
 846	/* See if we can pass the channel directly to another call. */
 847	if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE &&
 848	    !list_empty(&conn->waiting_calls)) {
 849		trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
 850		rxrpc_activate_one_channel(conn, channel);
 851		goto out_2;
 852	}
 853
 854	/* Schedule the final ACK to be transmitted in a short while so that it
 855	 * can be skipped if we find a follow-on call.  The first DATA packet
 856	 * of the follow on call will implicitly ACK this call.
 857	 */
 858	if (call->completion == RXRPC_CALL_SUCCEEDED &&
 859	    test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
 860		unsigned long final_ack_at = jiffies + 2;
 861
 862		WRITE_ONCE(chan->final_ack_at, final_ack_at);
 863		smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
 864		set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
 865		rxrpc_reduce_conn_timer(conn, final_ack_at);
 866	}
 867
 868	/* Things are more complex and we need the cache lock.  We might be
 869	 * able to simply idle the conn or it might now be lurking on the wait
 870	 * list.  It might even get moved back to the active list whilst we're
 871	 * waiting for the lock.
 872	 */
 873	spin_lock(&rxnet->client_conn_cache_lock);
 874
 875	switch (conn->cache_state) {
 876	case RXRPC_CONN_CLIENT_UPGRADE:
 877		/* Deal with termination of a service upgrade probe. */
 878		if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
 879			clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
 880			trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
 881			conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
 882			rxrpc_activate_channels_locked(conn);
 883		}
 884		fallthrough;
 885	case RXRPC_CONN_CLIENT_ACTIVE:
 886		if (list_empty(&conn->waiting_calls)) {
 887			rxrpc_deactivate_one_channel(conn, channel);
 888			if (!conn->active_chans) {
 889				rxnet->nr_active_client_conns--;
 890				goto idle_connection;
 891			}
 892			goto out;
 893		}
 894
 895		trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
 896		rxrpc_activate_one_channel(conn, channel);
 897		goto out;
 898
 899	case RXRPC_CONN_CLIENT_CULLED:
 900		rxrpc_deactivate_one_channel(conn, channel);
 901		ASSERT(list_empty(&conn->waiting_calls));
 902		if (!conn->active_chans)
 903			goto idle_connection;
 904		goto out;
 905
 906	case RXRPC_CONN_CLIENT_WAITING:
 907		rxrpc_deactivate_one_channel(conn, channel);
 908		goto out;
 909
 910	default:
 911		BUG();
 912	}
 913
 914out:
 915	spin_unlock(&rxnet->client_conn_cache_lock);
 916out_2:
 917	spin_unlock(&conn->channel_lock);
 918	_leave("");
 919	return;
 920
 921idle_connection:
 922	/* As no channels remain active, the connection gets deactivated
 923	 * immediately or moved to the idle list for a short while.
 924	 */
 925	if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
 926		trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
 927		conn->idle_timestamp = jiffies;
 928		conn->cache_state = RXRPC_CONN_CLIENT_IDLE;
 929		list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
 930		if (rxnet->idle_client_conns.next == &conn->cache_link &&
 931		    !rxnet->kill_all_client_conns)
 932			rxrpc_set_client_reap_timer(rxnet);
 933	} else {
 934		trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
 935		conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
 936		list_del_init(&conn->cache_link);
 937	}
 938	goto out;
 939}
 940
 941/*
 942 * Clean up a dead client connection.
 943 */
 944static struct rxrpc_connection *
 945rxrpc_put_one_client_conn(struct rxrpc_connection *conn)
 946{
 947	struct rxrpc_connection *next = NULL;
 948	struct rxrpc_local *local = conn->params.local;
 949	struct rxrpc_net *rxnet = local->rxnet;
 950	unsigned int nr_conns;
 951
 952	trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
 953
 954	if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags)) {
 955		spin_lock(&local->client_conns_lock);
 956		if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS,
 957				       &conn->flags))
 958			rb_erase(&conn->client_node, &local->client_conns);
 959		spin_unlock(&local->client_conns_lock);
 960	}
 961
 962	rxrpc_put_client_connection_id(conn);
 963
 964	ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_INACTIVE);
 965
 966	if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
 967		trace_rxrpc_client(conn, -1, rxrpc_client_uncount);
 968		spin_lock(&rxnet->client_conn_cache_lock);
 969		nr_conns = --rxnet->nr_client_conns;
 970
 971		if (nr_conns < rxrpc_max_client_connections &&
 972		    !list_empty(&rxnet->waiting_client_conns)) {
 973			next = list_entry(rxnet->waiting_client_conns.next,
 974					  struct rxrpc_connection, cache_link);
 975			rxrpc_get_connection(next);
 976			rxrpc_activate_conn(rxnet, next);
 977		}
 978
 979		spin_unlock(&rxnet->client_conn_cache_lock);
 980	}
 981
 982	rxrpc_kill_connection(conn);
 983	if (next)
 984		rxrpc_activate_channels(next);
 985
 986	/* We need to get rid of the temporary ref we took upon next, but we
 987	 * can't call rxrpc_put_connection() recursively.
 988	 */
 989	return next;
 990}
 991
 992/*
 993 * Clean up a dead client connections.
 994 */
 995void rxrpc_put_client_conn(struct rxrpc_connection *conn)
 996{
 997	const void *here = __builtin_return_address(0);
 998	unsigned int debug_id = conn->debug_id;
 999	int n;
1000
1001	do {
1002		n = atomic_dec_return(&conn->usage);
1003		trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here);
1004		if (n > 0)
1005			return;
1006		ASSERTCMP(n, >=, 0);
1007
1008		conn = rxrpc_put_one_client_conn(conn);
1009	} while (conn);
1010}
1011
1012/*
1013 * Kill the longest-active client connections to make room for new ones.
1014 */
1015static void rxrpc_cull_active_client_conns(struct rxrpc_net *rxnet)
1016{
1017	struct rxrpc_connection *conn;
1018	unsigned int nr_conns = rxnet->nr_client_conns;
1019	unsigned int nr_active, limit;
1020
1021	_enter("");
1022
1023	ASSERTCMP(nr_conns, >=, 0);
1024	if (nr_conns < rxrpc_max_client_connections) {
1025		_leave(" [ok]");
1026		return;
1027	}
1028	limit = rxrpc_reap_client_connections;
1029
1030	spin_lock(&rxnet->client_conn_cache_lock);
1031	nr_active = rxnet->nr_active_client_conns;
1032
1033	while (nr_active > limit) {
1034		ASSERT(!list_empty(&rxnet->active_client_conns));
1035		conn = list_entry(rxnet->active_client_conns.next,
1036				  struct rxrpc_connection, cache_link);
1037		ASSERTIFCMP(conn->cache_state != RXRPC_CONN_CLIENT_ACTIVE,
1038			    conn->cache_state, ==, RXRPC_CONN_CLIENT_UPGRADE);
1039
1040		if (list_empty(&conn->waiting_calls)) {
1041			trace_rxrpc_client(conn, -1, rxrpc_client_to_culled);
1042			conn->cache_state = RXRPC_CONN_CLIENT_CULLED;
1043			list_del_init(&conn->cache_link);
1044		} else {
1045			trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
1046			conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
1047			list_move_tail(&conn->cache_link,
1048				       &rxnet->waiting_client_conns);
1049		}
1050
1051		nr_active--;
1052	}
1053
1054	rxnet->nr_active_client_conns = nr_active;
1055	spin_unlock(&rxnet->client_conn_cache_lock);
1056	ASSERTCMP(nr_active, >=, 0);
1057	_leave(" [culled]");
1058}
1059
1060/*
1061 * Discard expired client connections from the idle list.  Each conn in the
1062 * idle list has been exposed and holds an extra ref because of that.
1063 *
1064 * This may be called from conn setup or from a work item so cannot be
1065 * considered non-reentrant.
1066 */
1067void rxrpc_discard_expired_client_conns(struct work_struct *work)
1068{
1069	struct rxrpc_connection *conn;
1070	struct rxrpc_net *rxnet =
1071		container_of(work, struct rxrpc_net, client_conn_reaper);
1072	unsigned long expiry, conn_expires_at, now;
1073	unsigned int nr_conns;
1074
1075	_enter("");
1076
1077	if (list_empty(&rxnet->idle_client_conns)) {
1078		_leave(" [empty]");
1079		return;
1080	}
1081
1082	/* Don't double up on the discarding */
1083	if (!spin_trylock(&rxnet->client_conn_discard_lock)) {
1084		_leave(" [already]");
1085		return;
1086	}
1087
1088	/* We keep an estimate of what the number of conns ought to be after
1089	 * we've discarded some so that we don't overdo the discarding.
1090	 */
1091	nr_conns = rxnet->nr_client_conns;
1092
1093next:
1094	spin_lock(&rxnet->client_conn_cache_lock);
1095
1096	if (list_empty(&rxnet->idle_client_conns))
1097		goto out;
1098
1099	conn = list_entry(rxnet->idle_client_conns.next,
1100			  struct rxrpc_connection, cache_link);
1101	ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags));
1102
1103	if (!rxnet->kill_all_client_conns) {
1104		/* If the number of connections is over the reap limit, we
1105		 * expedite discard by reducing the expiry timeout.  We must,
1106		 * however, have at least a short grace period to be able to do
1107		 * final-ACK or ABORT retransmission.
1108		 */
1109		expiry = rxrpc_conn_idle_client_expiry;
1110		if (nr_conns > rxrpc_reap_client_connections)
1111			expiry = rxrpc_conn_idle_client_fast_expiry;
1112		if (conn->params.local->service_closed)
1113			expiry = rxrpc_closed_conn_expiry * HZ;
1114
1115		conn_expires_at = conn->idle_timestamp + expiry;
1116
1117		now = READ_ONCE(jiffies);
1118		if (time_after(conn_expires_at, now))
1119			goto not_yet_expired;
1120	}
1121
1122	trace_rxrpc_client(conn, -1, rxrpc_client_discard);
1123	if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
1124		BUG();
1125	conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
1126	list_del_init(&conn->cache_link);
1127
1128	spin_unlock(&rxnet->client_conn_cache_lock);
1129
1130	/* When we cleared the EXPOSED flag, we took on responsibility for the
1131	 * reference that that had on the usage count.  We deal with that here.
1132	 * If someone re-sets the flag and re-gets the ref, that's fine.
1133	 */
1134	rxrpc_put_connection(conn);
1135	nr_conns--;
1136	goto next;
1137
1138not_yet_expired:
1139	/* The connection at the front of the queue hasn't yet expired, so
1140	 * schedule the work item for that point if we discarded something.
1141	 *
1142	 * We don't worry if the work item is already scheduled - it can look
1143	 * after rescheduling itself at a later time.  We could cancel it, but
1144	 * then things get messier.
1145	 */
1146	_debug("not yet");
1147	if (!rxnet->kill_all_client_conns)
1148		timer_reduce(&rxnet->client_conn_reap_timer,
1149			     conn_expires_at);
1150
1151out:
1152	spin_unlock(&rxnet->client_conn_cache_lock);
1153	spin_unlock(&rxnet->client_conn_discard_lock);
1154	_leave("");
1155}
1156
1157/*
1158 * Preemptively destroy all the client connection records rather than waiting
1159 * for them to time out
1160 */
1161void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
1162{
1163	_enter("");
1164
1165	spin_lock(&rxnet->client_conn_cache_lock);
1166	rxnet->kill_all_client_conns = true;
1167	spin_unlock(&rxnet->client_conn_cache_lock);
1168
1169	del_timer_sync(&rxnet->client_conn_reap_timer);
1170
1171	if (!rxrpc_queue_work(&rxnet->client_conn_reaper))
1172		_debug("destroy: queue failed");
1173
1174	_leave("");
1175}
1176
1177/*
1178 * Clean up the client connections on a local endpoint.
1179 */
1180void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
1181{
1182	struct rxrpc_connection *conn, *tmp;
1183	struct rxrpc_net *rxnet = local->rxnet;
1184	unsigned int nr_active;
1185	LIST_HEAD(graveyard);
1186
1187	_enter("");
1188
1189	spin_lock(&rxnet->client_conn_cache_lock);
1190	nr_active = rxnet->nr_active_client_conns;
1191
1192	list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns,
1193				 cache_link) {
1194		if (conn->params.local == local) {
1195			ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_IDLE);
1196
1197			trace_rxrpc_client(conn, -1, rxrpc_client_discard);
1198			if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
1199				BUG();
1200			conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
1201			list_move(&conn->cache_link, &graveyard);
1202			nr_active--;
1203		}
1204	}
1205
1206	rxnet->nr_active_client_conns = nr_active;
1207	spin_unlock(&rxnet->client_conn_cache_lock);
1208	ASSERTCMP(nr_active, >=, 0);
1209
1210	while (!list_empty(&graveyard)) {
1211		conn = list_entry(graveyard.next,
1212				  struct rxrpc_connection, cache_link);
1213		list_del_init(&conn->cache_link);
1214
1215		rxrpc_put_connection(conn);
1216	}
1217
1218	_leave(" [culled]");
1219}