Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* Service connection management
  3 *
  4 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
  5 * Written by David Howells (dhowells@redhat.com)
 
 
 
 
 
  6 */
  7
  8#include <linux/slab.h>
  9#include "ar-internal.h"
 10
 11static struct rxrpc_bundle rxrpc_service_dummy_bundle = {
 12	.ref		= REFCOUNT_INIT(1),
 13	.debug_id	= UINT_MAX,
 14};
 15
 16/*
 17 * Find a service connection under RCU conditions.
 18 *
 19 * We could use a hash table, but that is subject to bucket stuffing by an
 20 * attacker as the client gets to pick the epoch and cid values and would know
 21 * the hash function.  So, instead, we use a hash table for the peer and from
 22 * that an rbtree to find the service connection.  Under ordinary circumstances
 23 * it might be slower than a large hash table, but it is at least limited in
 24 * depth.
 25 */
 26struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
 27						     struct sk_buff *skb)
 28{
 29	struct rxrpc_connection *conn = NULL;
 30	struct rxrpc_conn_proto k;
 31	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
 32	struct rb_node *p;
 33	unsigned int seq = 0;
 34
 35	k.epoch	= sp->hdr.epoch;
 36	k.cid	= sp->hdr.cid & RXRPC_CIDMASK;
 37
 38	do {
 39		/* Unfortunately, rbtree walking doesn't give reliable results
 40		 * under just the RCU read lock, so we have to check for
 41		 * changes.
 42		 */
 43		read_seqbegin_or_lock(&peer->service_conn_lock, &seq);
 44
 45		p = rcu_dereference_raw(peer->service_conns.rb_node);
 46		while (p) {
 47			conn = rb_entry(p, struct rxrpc_connection, service_node);
 48
 49			if (conn->proto.index_key < k.index_key)
 50				p = rcu_dereference_raw(p->rb_left);
 51			else if (conn->proto.index_key > k.index_key)
 52				p = rcu_dereference_raw(p->rb_right);
 53			else
 54				break;
 55			conn = NULL;
 56		}
 57	} while (need_seqretry(&peer->service_conn_lock, seq));
 58
 59	done_seqretry(&peer->service_conn_lock, seq);
 60	_leave(" = %d", conn ? conn->debug_id : -1);
 61	return conn;
 62}
 63
 64/*
 65 * Insert a service connection into a peer's tree, thereby making it a target
 66 * for incoming packets.
 67 */
 68static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
 69				       struct rxrpc_connection *conn)
 70{
 71	struct rxrpc_connection *cursor = NULL;
 72	struct rxrpc_conn_proto k = conn->proto;
 73	struct rb_node **pp, *parent;
 74
 75	write_seqlock(&peer->service_conn_lock);
 76
 77	pp = &peer->service_conns.rb_node;
 78	parent = NULL;
 79	while (*pp) {
 80		parent = *pp;
 81		cursor = rb_entry(parent,
 82				  struct rxrpc_connection, service_node);
 83
 84		if (cursor->proto.index_key < k.index_key)
 85			pp = &(*pp)->rb_left;
 86		else if (cursor->proto.index_key > k.index_key)
 87			pp = &(*pp)->rb_right;
 88		else
 89			goto found_extant_conn;
 90	}
 91
 92	rb_link_node_rcu(&conn->service_node, parent, pp);
 93	rb_insert_color(&conn->service_node, &peer->service_conns);
 94conn_published:
 95	set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags);
 96	write_sequnlock(&peer->service_conn_lock);
 97	_leave(" = %d [new]", conn->debug_id);
 98	return;
 99
100found_extant_conn:
101	if (refcount_read(&cursor->ref) == 0)
102		goto replace_old_connection;
103	write_sequnlock(&peer->service_conn_lock);
104	/* We should not be able to get here.  rxrpc_incoming_connection() is
105	 * called in a non-reentrant context, so there can't be a race to
106	 * insert a new connection.
107	 */
108	BUG();
109
110replace_old_connection:
111	/* The old connection is from an outdated epoch. */
112	_debug("replace conn");
113	rb_replace_node_rcu(&cursor->service_node,
114			    &conn->service_node,
115			    &peer->service_conns);
116	clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &cursor->flags);
117	goto conn_published;
118}
119
120/*
121 * Preallocate a service connection.  The connection is placed on the proc and
122 * reap lists so that we don't have to get the lock from BH context.
123 */
124struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxnet,
125							   gfp_t gfp)
126{
127	struct rxrpc_connection *conn = rxrpc_alloc_connection(rxnet, gfp);
128
129	if (conn) {
130		/* We maintain an extra ref on the connection whilst it is on
131		 * the rxrpc_connections list.
132		 */
133		conn->state = RXRPC_CONN_SERVICE_PREALLOC;
134		refcount_set(&conn->ref, 2);
135		conn->bundle = rxrpc_get_bundle(&rxrpc_service_dummy_bundle,
136						rxrpc_bundle_get_service_conn);
137
138		atomic_inc(&rxnet->nr_conns);
139		write_lock(&rxnet->conn_lock);
140		list_add_tail(&conn->link, &rxnet->service_conns);
141		list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
142		write_unlock(&rxnet->conn_lock);
143
144		rxrpc_see_connection(conn, rxrpc_conn_new_service);
 
 
145	}
146
147	return conn;
148}
149
150/*
151 * Set up an incoming connection.  This is called in BH context with the RCU
152 * read lock held.
153 */
154void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
155				   struct rxrpc_connection *conn,
156				   const struct rxrpc_security *sec,
157				   struct sk_buff *skb)
158{
159	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
160
161	_enter("");
162
163	conn->proto.epoch	= sp->hdr.epoch;
164	conn->proto.cid		= sp->hdr.cid & RXRPC_CIDMASK;
165	conn->orig_service_id	= sp->hdr.serviceId;
166	conn->service_id	= sp->hdr.serviceId;
167	conn->security_ix	= sp->hdr.securityIndex;
168	conn->out_clientflag	= 0;
169	conn->security		= sec;
170	if (conn->security_ix)
171		conn->state	= RXRPC_CONN_SERVICE_UNSECURED;
172	else
173		conn->state	= RXRPC_CONN_SERVICE;
174
175	/* See if we should upgrade the service.  This can only happen on the
176	 * first packet on a new connection.  Once done, it applies to all
177	 * subsequent calls on that connection.
178	 */
179	if (sp->hdr.userStatus == RXRPC_USERSTATUS_SERVICE_UPGRADE &&
180	    conn->service_id == rx->service_upgrade.from)
181		conn->service_id = rx->service_upgrade.to;
182
183	atomic_set(&conn->active, 1);
184
185	/* Make the connection a target for incoming packets. */
186	rxrpc_publish_service_conn(conn->peer, conn);
 
 
187}
188
189/*
190 * Remove the service connection from the peer's tree, thereby removing it as a
191 * target for incoming packets.
192 */
193void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn)
194{
195	struct rxrpc_peer *peer = conn->peer;
196
197	write_seqlock(&peer->service_conn_lock);
198	if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags))
199		rb_erase(&conn->service_node, &peer->service_conns);
200	write_sequnlock(&peer->service_conn_lock);
201}
v4.17
 
  1/* Service connection management
  2 *
  3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
  4 * Written by David Howells (dhowells@redhat.com)
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public Licence
  8 * as published by the Free Software Foundation; either version
  9 * 2 of the Licence, or (at your option) any later version.
 10 */
 11
 12#include <linux/slab.h>
 13#include "ar-internal.h"
 14
 
 
 
 
 
 15/*
 16 * Find a service connection under RCU conditions.
 17 *
 18 * We could use a hash table, but that is subject to bucket stuffing by an
 19 * attacker as the client gets to pick the epoch and cid values and would know
 20 * the hash function.  So, instead, we use a hash table for the peer and from
 21 * that an rbtree to find the service connection.  Under ordinary circumstances
 22 * it might be slower than a large hash table, but it is at least limited in
 23 * depth.
 24 */
 25struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
 26						     struct sk_buff *skb)
 27{
 28	struct rxrpc_connection *conn = NULL;
 29	struct rxrpc_conn_proto k;
 30	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
 31	struct rb_node *p;
 32	unsigned int seq = 0;
 33
 34	k.epoch	= sp->hdr.epoch;
 35	k.cid	= sp->hdr.cid & RXRPC_CIDMASK;
 36
 37	do {
 38		/* Unfortunately, rbtree walking doesn't give reliable results
 39		 * under just the RCU read lock, so we have to check for
 40		 * changes.
 41		 */
 42		read_seqbegin_or_lock(&peer->service_conn_lock, &seq);
 43
 44		p = rcu_dereference_raw(peer->service_conns.rb_node);
 45		while (p) {
 46			conn = rb_entry(p, struct rxrpc_connection, service_node);
 47
 48			if (conn->proto.index_key < k.index_key)
 49				p = rcu_dereference_raw(p->rb_left);
 50			else if (conn->proto.index_key > k.index_key)
 51				p = rcu_dereference_raw(p->rb_right);
 52			else
 53				break;
 54			conn = NULL;
 55		}
 56	} while (need_seqretry(&peer->service_conn_lock, seq));
 57
 58	done_seqretry(&peer->service_conn_lock, seq);
 59	_leave(" = %d", conn ? conn->debug_id : -1);
 60	return conn;
 61}
 62
 63/*
 64 * Insert a service connection into a peer's tree, thereby making it a target
 65 * for incoming packets.
 66 */
 67static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
 68				       struct rxrpc_connection *conn)
 69{
 70	struct rxrpc_connection *cursor = NULL;
 71	struct rxrpc_conn_proto k = conn->proto;
 72	struct rb_node **pp, *parent;
 73
 74	write_seqlock_bh(&peer->service_conn_lock);
 75
 76	pp = &peer->service_conns.rb_node;
 77	parent = NULL;
 78	while (*pp) {
 79		parent = *pp;
 80		cursor = rb_entry(parent,
 81				  struct rxrpc_connection, service_node);
 82
 83		if (cursor->proto.index_key < k.index_key)
 84			pp = &(*pp)->rb_left;
 85		else if (cursor->proto.index_key > k.index_key)
 86			pp = &(*pp)->rb_right;
 87		else
 88			goto found_extant_conn;
 89	}
 90
 91	rb_link_node_rcu(&conn->service_node, parent, pp);
 92	rb_insert_color(&conn->service_node, &peer->service_conns);
 93conn_published:
 94	set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags);
 95	write_sequnlock_bh(&peer->service_conn_lock);
 96	_leave(" = %d [new]", conn->debug_id);
 97	return;
 98
 99found_extant_conn:
100	if (atomic_read(&cursor->usage) == 0)
101		goto replace_old_connection;
102	write_sequnlock_bh(&peer->service_conn_lock);
103	/* We should not be able to get here.  rxrpc_incoming_connection() is
104	 * called in a non-reentrant context, so there can't be a race to
105	 * insert a new connection.
106	 */
107	BUG();
108
109replace_old_connection:
110	/* The old connection is from an outdated epoch. */
111	_debug("replace conn");
112	rb_replace_node_rcu(&cursor->service_node,
113			    &conn->service_node,
114			    &peer->service_conns);
115	clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &cursor->flags);
116	goto conn_published;
117}
118
119/*
120 * Preallocate a service connection.  The connection is placed on the proc and
121 * reap lists so that we don't have to get the lock from BH context.
122 */
123struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxnet,
124							   gfp_t gfp)
125{
126	struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp);
127
128	if (conn) {
129		/* We maintain an extra ref on the connection whilst it is on
130		 * the rxrpc_connections list.
131		 */
132		conn->state = RXRPC_CONN_SERVICE_PREALLOC;
133		atomic_set(&conn->usage, 2);
 
 
134
135		atomic_inc(&rxnet->nr_conns);
136		write_lock(&rxnet->conn_lock);
137		list_add_tail(&conn->link, &rxnet->service_conns);
138		list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
139		write_unlock(&rxnet->conn_lock);
140
141		trace_rxrpc_conn(conn, rxrpc_conn_new_service,
142				 atomic_read(&conn->usage),
143				 __builtin_return_address(0));
144	}
145
146	return conn;
147}
148
149/*
150 * Set up an incoming connection.  This is called in BH context with the RCU
151 * read lock held.
152 */
153void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
154				   struct rxrpc_connection *conn,
 
155				   struct sk_buff *skb)
156{
157	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
158
159	_enter("");
160
161	conn->proto.epoch	= sp->hdr.epoch;
162	conn->proto.cid		= sp->hdr.cid & RXRPC_CIDMASK;
163	conn->params.service_id	= sp->hdr.serviceId;
164	conn->service_id	= sp->hdr.serviceId;
165	conn->security_ix	= sp->hdr.securityIndex;
166	conn->out_clientflag	= 0;
 
167	if (conn->security_ix)
168		conn->state	= RXRPC_CONN_SERVICE_UNSECURED;
169	else
170		conn->state	= RXRPC_CONN_SERVICE;
171
172	/* See if we should upgrade the service.  This can only happen on the
173	 * first packet on a new connection.  Once done, it applies to all
174	 * subsequent calls on that connection.
175	 */
176	if (sp->hdr.userStatus == RXRPC_USERSTATUS_SERVICE_UPGRADE &&
177	    conn->service_id == rx->service_upgrade.from)
178		conn->service_id = rx->service_upgrade.to;
179
 
 
180	/* Make the connection a target for incoming packets. */
181	rxrpc_publish_service_conn(conn->params.peer, conn);
182
183	_net("CONNECTION new %d {%x}", conn->debug_id, conn->proto.cid);
184}
185
186/*
187 * Remove the service connection from the peer's tree, thereby removing it as a
188 * target for incoming packets.
189 */
190void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn)
191{
192	struct rxrpc_peer *peer = conn->params.peer;
193
194	write_seqlock_bh(&peer->service_conn_lock);
195	if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags))
196		rb_erase(&conn->service_node, &peer->service_conns);
197	write_sequnlock_bh(&peer->service_conn_lock);
198}