Linux Audio

Check our new training course

Loading...
v4.17
 
  1/* Service connection management
  2 *
  3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
  4 * Written by David Howells (dhowells@redhat.com)
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public Licence
  8 * as published by the Free Software Foundation; either version
  9 * 2 of the Licence, or (at your option) any later version.
 10 */
 11
 12#include <linux/slab.h>
 13#include "ar-internal.h"
 14
 15/*
 16 * Find a service connection under RCU conditions.
 17 *
 18 * We could use a hash table, but that is subject to bucket stuffing by an
 19 * attacker as the client gets to pick the epoch and cid values and would know
 20 * the hash function.  So, instead, we use a hash table for the peer and from
 21 * that an rbtree to find the service connection.  Under ordinary circumstances
 22 * it might be slower than a large hash table, but it is at least limited in
 23 * depth.
 24 */
 25struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
 26						     struct sk_buff *skb)
 27{
 28	struct rxrpc_connection *conn = NULL;
 29	struct rxrpc_conn_proto k;
 30	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
 31	struct rb_node *p;
 32	unsigned int seq = 0;
 33
 34	k.epoch	= sp->hdr.epoch;
 35	k.cid	= sp->hdr.cid & RXRPC_CIDMASK;
 36
 37	do {
 38		/* Unfortunately, rbtree walking doesn't give reliable results
 39		 * under just the RCU read lock, so we have to check for
 40		 * changes.
 41		 */
 
 42		read_seqbegin_or_lock(&peer->service_conn_lock, &seq);
 43
 44		p = rcu_dereference_raw(peer->service_conns.rb_node);
 45		while (p) {
 46			conn = rb_entry(p, struct rxrpc_connection, service_node);
 47
 48			if (conn->proto.index_key < k.index_key)
 49				p = rcu_dereference_raw(p->rb_left);
 50			else if (conn->proto.index_key > k.index_key)
 51				p = rcu_dereference_raw(p->rb_right);
 52			else
 53				break;
 54			conn = NULL;
 55		}
 56	} while (need_seqretry(&peer->service_conn_lock, seq));
 57
 58	done_seqretry(&peer->service_conn_lock, seq);
 59	_leave(" = %d", conn ? conn->debug_id : -1);
 60	return conn;
 61}
 62
 63/*
 64 * Insert a service connection into a peer's tree, thereby making it a target
 65 * for incoming packets.
 66 */
 67static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
 68				       struct rxrpc_connection *conn)
 69{
 70	struct rxrpc_connection *cursor = NULL;
 71	struct rxrpc_conn_proto k = conn->proto;
 72	struct rb_node **pp, *parent;
 73
 74	write_seqlock_bh(&peer->service_conn_lock);
 75
 76	pp = &peer->service_conns.rb_node;
 77	parent = NULL;
 78	while (*pp) {
 79		parent = *pp;
 80		cursor = rb_entry(parent,
 81				  struct rxrpc_connection, service_node);
 82
 83		if (cursor->proto.index_key < k.index_key)
 84			pp = &(*pp)->rb_left;
 85		else if (cursor->proto.index_key > k.index_key)
 86			pp = &(*pp)->rb_right;
 87		else
 88			goto found_extant_conn;
 89	}
 90
 91	rb_link_node_rcu(&conn->service_node, parent, pp);
 92	rb_insert_color(&conn->service_node, &peer->service_conns);
 93conn_published:
 94	set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags);
 95	write_sequnlock_bh(&peer->service_conn_lock);
 96	_leave(" = %d [new]", conn->debug_id);
 97	return;
 98
 99found_extant_conn:
100	if (atomic_read(&cursor->usage) == 0)
101		goto replace_old_connection;
102	write_sequnlock_bh(&peer->service_conn_lock);
103	/* We should not be able to get here.  rxrpc_incoming_connection() is
104	 * called in a non-reentrant context, so there can't be a race to
105	 * insert a new connection.
106	 */
107	BUG();
108
109replace_old_connection:
110	/* The old connection is from an outdated epoch. */
111	_debug("replace conn");
112	rb_replace_node_rcu(&cursor->service_node,
113			    &conn->service_node,
114			    &peer->service_conns);
115	clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &cursor->flags);
116	goto conn_published;
117}
118
119/*
120 * Preallocate a service connection.  The connection is placed on the proc and
121 * reap lists so that we don't have to get the lock from BH context.
122 */
123struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxnet,
124							   gfp_t gfp)
125{
126	struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp);
127
128	if (conn) {
129		/* We maintain an extra ref on the connection whilst it is on
130		 * the rxrpc_connections list.
131		 */
132		conn->state = RXRPC_CONN_SERVICE_PREALLOC;
133		atomic_set(&conn->usage, 2);
134
135		atomic_inc(&rxnet->nr_conns);
136		write_lock(&rxnet->conn_lock);
137		list_add_tail(&conn->link, &rxnet->service_conns);
138		list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
139		write_unlock(&rxnet->conn_lock);
140
141		trace_rxrpc_conn(conn, rxrpc_conn_new_service,
142				 atomic_read(&conn->usage),
143				 __builtin_return_address(0));
144	}
145
146	return conn;
147}
148
149/*
150 * Set up an incoming connection.  This is called in BH context with the RCU
151 * read lock held.
152 */
153void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
154				   struct rxrpc_connection *conn,
 
155				   struct sk_buff *skb)
156{
157	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
158
159	_enter("");
160
161	conn->proto.epoch	= sp->hdr.epoch;
162	conn->proto.cid		= sp->hdr.cid & RXRPC_CIDMASK;
163	conn->params.service_id	= sp->hdr.serviceId;
164	conn->service_id	= sp->hdr.serviceId;
165	conn->security_ix	= sp->hdr.securityIndex;
166	conn->out_clientflag	= 0;
 
167	if (conn->security_ix)
168		conn->state	= RXRPC_CONN_SERVICE_UNSECURED;
169	else
170		conn->state	= RXRPC_CONN_SERVICE;
171
172	/* See if we should upgrade the service.  This can only happen on the
173	 * first packet on a new connection.  Once done, it applies to all
174	 * subsequent calls on that connection.
175	 */
176	if (sp->hdr.userStatus == RXRPC_USERSTATUS_SERVICE_UPGRADE &&
177	    conn->service_id == rx->service_upgrade.from)
178		conn->service_id = rx->service_upgrade.to;
179
180	/* Make the connection a target for incoming packets. */
181	rxrpc_publish_service_conn(conn->params.peer, conn);
182
183	_net("CONNECTION new %d {%x}", conn->debug_id, conn->proto.cid);
 
184}
185
186/*
187 * Remove the service connection from the peer's tree, thereby removing it as a
188 * target for incoming packets.
189 */
190void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn)
191{
192	struct rxrpc_peer *peer = conn->params.peer;
193
194	write_seqlock_bh(&peer->service_conn_lock);
195	if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags))
196		rb_erase(&conn->service_node, &peer->service_conns);
197	write_sequnlock_bh(&peer->service_conn_lock);
198}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* Service connection management
  3 *
  4 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
  5 * Written by David Howells (dhowells@redhat.com)
 
 
 
 
 
  6 */
  7
  8#include <linux/slab.h>
  9#include "ar-internal.h"
 10
 11/*
 12 * Find a service connection under RCU conditions.
 13 *
 14 * We could use a hash table, but that is subject to bucket stuffing by an
 15 * attacker as the client gets to pick the epoch and cid values and would know
 16 * the hash function.  So, instead, we use a hash table for the peer and from
 17 * that an rbtree to find the service connection.  Under ordinary circumstances
 18 * it might be slower than a large hash table, but it is at least limited in
 19 * depth.
 20 */
 21struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
 22						     struct sk_buff *skb)
 23{
 24	struct rxrpc_connection *conn = NULL;
 25	struct rxrpc_conn_proto k;
 26	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
 27	struct rb_node *p;
 28	unsigned int seq = 1;
 29
 30	k.epoch	= sp->hdr.epoch;
 31	k.cid	= sp->hdr.cid & RXRPC_CIDMASK;
 32
 33	do {
 34		/* Unfortunately, rbtree walking doesn't give reliable results
 35		 * under just the RCU read lock, so we have to check for
 36		 * changes.
 37		 */
 38		seq++; /* 2 on the 1st/lockless path, otherwise odd */
 39		read_seqbegin_or_lock(&peer->service_conn_lock, &seq);
 40
 41		p = rcu_dereference_raw(peer->service_conns.rb_node);
 42		while (p) {
 43			conn = rb_entry(p, struct rxrpc_connection, service_node);
 44
 45			if (conn->proto.index_key < k.index_key)
 46				p = rcu_dereference_raw(p->rb_left);
 47			else if (conn->proto.index_key > k.index_key)
 48				p = rcu_dereference_raw(p->rb_right);
 49			else
 50				break;
 51			conn = NULL;
 52		}
 53	} while (need_seqretry(&peer->service_conn_lock, seq));
 54
 55	done_seqretry(&peer->service_conn_lock, seq);
 56	_leave(" = %d", conn ? conn->debug_id : -1);
 57	return conn;
 58}
 59
 60/*
 61 * Insert a service connection into a peer's tree, thereby making it a target
 62 * for incoming packets.
 63 */
 64static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
 65				       struct rxrpc_connection *conn)
 66{
 67	struct rxrpc_connection *cursor = NULL;
 68	struct rxrpc_conn_proto k = conn->proto;
 69	struct rb_node **pp, *parent;
 70
 71	write_seqlock(&peer->service_conn_lock);
 72
 73	pp = &peer->service_conns.rb_node;
 74	parent = NULL;
 75	while (*pp) {
 76		parent = *pp;
 77		cursor = rb_entry(parent,
 78				  struct rxrpc_connection, service_node);
 79
 80		if (cursor->proto.index_key < k.index_key)
 81			pp = &(*pp)->rb_left;
 82		else if (cursor->proto.index_key > k.index_key)
 83			pp = &(*pp)->rb_right;
 84		else
 85			goto found_extant_conn;
 86	}
 87
 88	rb_link_node_rcu(&conn->service_node, parent, pp);
 89	rb_insert_color(&conn->service_node, &peer->service_conns);
 90conn_published:
 91	set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags);
 92	write_sequnlock(&peer->service_conn_lock);
 93	_leave(" = %d [new]", conn->debug_id);
 94	return;
 95
 96found_extant_conn:
 97	if (refcount_read(&cursor->ref) == 0)
 98		goto replace_old_connection;
 99	write_sequnlock(&peer->service_conn_lock);
100	/* We should not be able to get here.  rxrpc_incoming_connection() is
101	 * called in a non-reentrant context, so there can't be a race to
102	 * insert a new connection.
103	 */
104	BUG();
105
106replace_old_connection:
107	/* The old connection is from an outdated epoch. */
108	_debug("replace conn");
109	rb_replace_node_rcu(&cursor->service_node,
110			    &conn->service_node,
111			    &peer->service_conns);
112	clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &cursor->flags);
113	goto conn_published;
114}
115
116/*
117 * Preallocate a service connection.  The connection is placed on the proc and
118 * reap lists so that we don't have to get the lock from BH context.
119 */
120struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxnet,
121							   gfp_t gfp)
122{
123	struct rxrpc_connection *conn = rxrpc_alloc_connection(rxnet, gfp);
124
125	if (conn) {
126		/* We maintain an extra ref on the connection whilst it is on
127		 * the rxrpc_connections list.
128		 */
129		conn->state = RXRPC_CONN_SERVICE_PREALLOC;
130		refcount_set(&conn->ref, 2);
131
132		atomic_inc(&rxnet->nr_conns);
133		write_lock(&rxnet->conn_lock);
134		list_add_tail(&conn->link, &rxnet->service_conns);
135		list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
136		write_unlock(&rxnet->conn_lock);
137
138		rxrpc_see_connection(conn, rxrpc_conn_new_service);
 
 
139	}
140
141	return conn;
142}
143
144/*
145 * Set up an incoming connection.  This is called in BH context with the RCU
146 * read lock held.
147 */
148void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
149				   struct rxrpc_connection *conn,
150				   const struct rxrpc_security *sec,
151				   struct sk_buff *skb)
152{
153	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
154
155	_enter("");
156
157	conn->proto.epoch	= sp->hdr.epoch;
158	conn->proto.cid		= sp->hdr.cid & RXRPC_CIDMASK;
159	conn->orig_service_id	= sp->hdr.serviceId;
160	conn->service_id	= sp->hdr.serviceId;
161	conn->security_ix	= sp->hdr.securityIndex;
162	conn->out_clientflag	= 0;
163	conn->security		= sec;
164	if (conn->security_ix)
165		conn->state	= RXRPC_CONN_SERVICE_UNSECURED;
166	else
167		conn->state	= RXRPC_CONN_SERVICE;
168
169	/* See if we should upgrade the service.  This can only happen on the
170	 * first packet on a new connection.  Once done, it applies to all
171	 * subsequent calls on that connection.
172	 */
173	if (sp->hdr.userStatus == RXRPC_USERSTATUS_SERVICE_UPGRADE &&
174	    conn->service_id == rx->service_upgrade.from)
175		conn->service_id = rx->service_upgrade.to;
176
177	atomic_set(&conn->active, 1);
 
178
179	/* Make the connection a target for incoming packets. */
180	rxrpc_publish_service_conn(conn->peer, conn);
181}
182
183/*
184 * Remove the service connection from the peer's tree, thereby removing it as a
185 * target for incoming packets.
186 */
187void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn)
188{
189	struct rxrpc_peer *peer = conn->peer;
190
191	write_seqlock(&peer->service_conn_lock);
192	if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags))
193		rb_erase(&conn->service_node, &peer->service_conns);
194	write_sequnlock(&peer->service_conn_lock);
195}