Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Copyright (c) 2006 Oracle.  All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 */
 33#include <linux/kernel.h>
 34#include <linux/random.h>
 35#include <linux/export.h>
 36
 37#include "rds.h"
 38
 39/*
 40 * All of connection management is simplified by serializing it through
 41 * work queues that execute in a connection managing thread.
 42 *
 43 * TCP wants to send acks through sendpage() in response to data_ready(),
 44 * but it needs a process context to do so.
 45 *
 46 * The receive paths need to allocate but can't drop packets (!) so we have
 47 * a thread around to block allocating if the receive fast path sees an
 48 * allocation failure.
 49 */
 50
 51/* Grand Unified Theory of connection life cycle:
 52 * At any point in time, the connection can be in one of these states:
 53 * DOWN, CONNECTING, UP, DISCONNECTING, ERROR
 54 *
 55 * The following transitions are possible:
 56 *  ANY		  -> ERROR
 57 *  UP		  -> DISCONNECTING
 58 *  ERROR	  -> DISCONNECTING
 59 *  DISCONNECTING -> DOWN
 60 *  DOWN	  -> CONNECTING
 61 *  CONNECTING	  -> UP
 62 *
 63 * Transition to state DISCONNECTING/DOWN:
 64 *  -	Inside the shutdown worker; synchronizes with xmit path
 65 *	through RDS_IN_XMIT, and with connection management callbacks
 66 *	via c_cm_lock.
 67 *
 68 *	For receive callbacks, we rely on the underlying transport
 69 *	(TCP, IB/RDMA) to provide the necessary synchronisation.
 70 */
 71struct workqueue_struct *rds_wq;
 72EXPORT_SYMBOL_GPL(rds_wq);
 73
 74void rds_connect_complete(struct rds_connection *conn)
 75{
 76	if (!rds_conn_transition(conn, RDS_CONN_CONNECTING, RDS_CONN_UP)) {
 77		printk(KERN_WARNING "%s: Cannot transition to state UP, "
 78				"current state is %d\n",
 79				__func__,
 80				atomic_read(&conn->c_state));
 81		atomic_set(&conn->c_state, RDS_CONN_ERROR);
 82		queue_work(rds_wq, &conn->c_down_w);
 83		return;
 84	}
 85
 86	rdsdebug("conn %p for %pI4 to %pI4 complete\n",
 87	  conn, &conn->c_laddr, &conn->c_faddr);
 88
 89	conn->c_reconnect_jiffies = 0;
 90	set_bit(0, &conn->c_map_queued);
 91	queue_delayed_work(rds_wq, &conn->c_send_w, 0);
 92	queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
 
 
 
 
 
 
 
 
 
 
 93}
 94EXPORT_SYMBOL_GPL(rds_connect_complete);
 95
 96/*
 97 * This random exponential backoff is relied on to eventually resolve racing
 98 * connects.
 99 *
100 * If connect attempts race then both parties drop both connections and come
101 * here to wait for a random amount of time before trying again.  Eventually
102 * the backoff range will be so much greater than the time it takes to
103 * establish a connection that one of the pair will establish the connection
104 * before the other's random delay fires.
105 *
106 * Connection attempts that arrive while a connection is already established
107 * are also considered to be racing connects.  This lets a connection from
108 * a rebooted machine replace an existing stale connection before the transport
109 * notices that the connection has failed.
110 *
111 * We should *always* start with a random backoff; otherwise a broken connection
112 * will always take several iterations to be re-established.
113 */
114void rds_queue_reconnect(struct rds_connection *conn)
115{
116	unsigned long rand;
 
117
118	rdsdebug("conn %p for %pI4 to %pI4 reconnect jiffies %lu\n",
119	  conn, &conn->c_laddr, &conn->c_faddr,
120	  conn->c_reconnect_jiffies);
 
 
 
 
 
121
122	set_bit(RDS_RECONNECT_PENDING, &conn->c_flags);
123	if (conn->c_reconnect_jiffies == 0) {
124		conn->c_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies;
125		queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
 
 
 
126		return;
127	}
128
129	get_random_bytes(&rand, sizeof(rand));
130	rdsdebug("%lu delay %lu ceil conn %p for %pI4 -> %pI4\n",
131		 rand % conn->c_reconnect_jiffies, conn->c_reconnect_jiffies,
132		 conn, &conn->c_laddr, &conn->c_faddr);
133	queue_delayed_work(rds_wq, &conn->c_conn_w,
134			   rand % conn->c_reconnect_jiffies);
 
 
 
135
136	conn->c_reconnect_jiffies = min(conn->c_reconnect_jiffies * 2,
137					rds_sysctl_reconnect_max_jiffies);
138}
139
140void rds_connect_worker(struct work_struct *work)
141{
142	struct rds_connection *conn = container_of(work, struct rds_connection, c_conn_w.work);
 
 
 
143	int ret;
144
145	clear_bit(RDS_RECONNECT_PENDING, &conn->c_flags);
146	if (rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
147		ret = conn->c_trans->conn_connect(conn);
 
 
 
 
148		rdsdebug("conn %p for %pI4 to %pI4 dispatched, ret %d\n",
149			conn, &conn->c_laddr, &conn->c_faddr, ret);
150
151		if (ret) {
152			if (rds_conn_transition(conn, RDS_CONN_CONNECTING, RDS_CONN_DOWN))
153				rds_queue_reconnect(conn);
 
 
154			else
155				rds_conn_error(conn, "RDS: connect failed\n");
156		}
157	}
158}
159
160void rds_send_worker(struct work_struct *work)
161{
162	struct rds_connection *conn = container_of(work, struct rds_connection, c_send_w.work);
 
 
163	int ret;
164
165	if (rds_conn_state(conn) == RDS_CONN_UP) {
166		ret = rds_send_xmit(conn);
167		rdsdebug("conn %p ret %d\n", conn, ret);
 
 
168		switch (ret) {
169		case -EAGAIN:
170			rds_stats_inc(s_send_immediate_retry);
171			queue_delayed_work(rds_wq, &conn->c_send_w, 0);
172			break;
173		case -ENOMEM:
174			rds_stats_inc(s_send_delayed_retry);
175			queue_delayed_work(rds_wq, &conn->c_send_w, 2);
176		default:
177			break;
178		}
179	}
180}
181
182void rds_recv_worker(struct work_struct *work)
183{
184	struct rds_connection *conn = container_of(work, struct rds_connection, c_recv_w.work);
 
 
185	int ret;
186
187	if (rds_conn_state(conn) == RDS_CONN_UP) {
188		ret = conn->c_trans->recv(conn);
189		rdsdebug("conn %p ret %d\n", conn, ret);
190		switch (ret) {
191		case -EAGAIN:
192			rds_stats_inc(s_recv_immediate_retry);
193			queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
194			break;
195		case -ENOMEM:
196			rds_stats_inc(s_recv_delayed_retry);
197			queue_delayed_work(rds_wq, &conn->c_recv_w, 2);
198		default:
199			break;
200		}
201	}
202}
203
204void rds_shutdown_worker(struct work_struct *work)
205{
206	struct rds_connection *conn = container_of(work, struct rds_connection, c_down_w);
 
 
207
208	rds_conn_shutdown(conn);
209}
210
211void rds_threads_exit(void)
212{
213	destroy_workqueue(rds_wq);
214}
215
216int rds_threads_init(void)
217{
218	rds_wq = create_singlethread_workqueue("krdsd");
219	if (!rds_wq)
220		return -ENOMEM;
221
222	return 0;
223}
v4.17
  1/*
  2 * Copyright (c) 2006 Oracle.  All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 */
 33#include <linux/kernel.h>
 34#include <linux/random.h>
 35#include <linux/export.h>
 36
 37#include "rds.h"
 38
 39/*
 40 * All of connection management is simplified by serializing it through
 41 * work queues that execute in a connection managing thread.
 42 *
 43 * TCP wants to send acks through sendpage() in response to data_ready(),
 44 * but it needs a process context to do so.
 45 *
 46 * The receive paths need to allocate but can't drop packets (!) so we have
 47 * a thread around to block allocating if the receive fast path sees an
 48 * allocation failure.
 49 */
 50
 51/* Grand Unified Theory of connection life cycle:
 52 * At any point in time, the connection can be in one of these states:
 53 * DOWN, CONNECTING, UP, DISCONNECTING, ERROR
 54 *
 55 * The following transitions are possible:
 56 *  ANY		  -> ERROR
 57 *  UP		  -> DISCONNECTING
 58 *  ERROR	  -> DISCONNECTING
 59 *  DISCONNECTING -> DOWN
 60 *  DOWN	  -> CONNECTING
 61 *  CONNECTING	  -> UP
 62 *
 63 * Transition to state DISCONNECTING/DOWN:
 64 *  -	Inside the shutdown worker; synchronizes with xmit path
 65 *	through RDS_IN_XMIT, and with connection management callbacks
 66 *	via c_cm_lock.
 67 *
 68 *	For receive callbacks, we rely on the underlying transport
 69 *	(TCP, IB/RDMA) to provide the necessary synchronisation.
 70 */
 71struct workqueue_struct *rds_wq;
 72EXPORT_SYMBOL_GPL(rds_wq);
 73
 74void rds_connect_path_complete(struct rds_conn_path *cp, int curr)
 75{
 76	if (!rds_conn_path_transition(cp, curr, RDS_CONN_UP)) {
 77		printk(KERN_WARNING "%s: Cannot transition to state UP, "
 78				"current state is %d\n",
 79				__func__,
 80				atomic_read(&cp->cp_state));
 81		rds_conn_path_drop(cp, false);
 
 82		return;
 83	}
 84
 85	rdsdebug("conn %p for %pI4 to %pI4 complete\n",
 86	  cp->cp_conn, &cp->cp_conn->c_laddr, &cp->cp_conn->c_faddr);
 87
 88	cp->cp_reconnect_jiffies = 0;
 89	set_bit(0, &cp->cp_conn->c_map_queued);
 90	rcu_read_lock();
 91	if (!rds_destroy_pending(cp->cp_conn)) {
 92		queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
 93		queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
 94	}
 95	rcu_read_unlock();
 96}
 97EXPORT_SYMBOL_GPL(rds_connect_path_complete);
 98
 99void rds_connect_complete(struct rds_connection *conn)
100{
101	rds_connect_path_complete(&conn->c_path[0], RDS_CONN_CONNECTING);
102}
103EXPORT_SYMBOL_GPL(rds_connect_complete);
104
105/*
106 * This random exponential backoff is relied on to eventually resolve racing
107 * connects.
108 *
109 * If connect attempts race then both parties drop both connections and come
110 * here to wait for a random amount of time before trying again.  Eventually
111 * the backoff range will be so much greater than the time it takes to
112 * establish a connection that one of the pair will establish the connection
113 * before the other's random delay fires.
114 *
115 * Connection attempts that arrive while a connection is already established
116 * are also considered to be racing connects.  This lets a connection from
117 * a rebooted machine replace an existing stale connection before the transport
118 * notices that the connection has failed.
119 *
120 * We should *always* start with a random backoff; otherwise a broken connection
121 * will always take several iterations to be re-established.
122 */
123void rds_queue_reconnect(struct rds_conn_path *cp)
124{
125	unsigned long rand;
126	struct rds_connection *conn = cp->cp_conn;
127
128	rdsdebug("conn %p for %pI4 to %pI4 reconnect jiffies %lu\n",
129	  conn, &conn->c_laddr, &conn->c_faddr,
130	  cp->cp_reconnect_jiffies);
131
132	/* let peer with smaller addr initiate reconnect, to avoid duels */
133	if (conn->c_trans->t_type == RDS_TRANS_TCP &&
134	    !IS_CANONICAL(conn->c_laddr, conn->c_faddr))
135		return;
136
137	set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
138	if (cp->cp_reconnect_jiffies == 0) {
139		cp->cp_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies;
140		rcu_read_lock();
141		if (!rds_destroy_pending(cp->cp_conn))
142			queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
143		rcu_read_unlock();
144		return;
145	}
146
147	get_random_bytes(&rand, sizeof(rand));
148	rdsdebug("%lu delay %lu ceil conn %p for %pI4 -> %pI4\n",
149		 rand % cp->cp_reconnect_jiffies, cp->cp_reconnect_jiffies,
150		 conn, &conn->c_laddr, &conn->c_faddr);
151	rcu_read_lock();
152	if (!rds_destroy_pending(cp->cp_conn))
153		queue_delayed_work(rds_wq, &cp->cp_conn_w,
154				   rand % cp->cp_reconnect_jiffies);
155	rcu_read_unlock();
156
157	cp->cp_reconnect_jiffies = min(cp->cp_reconnect_jiffies * 2,
158					rds_sysctl_reconnect_max_jiffies);
159}
160
161void rds_connect_worker(struct work_struct *work)
162{
163	struct rds_conn_path *cp = container_of(work,
164						struct rds_conn_path,
165						cp_conn_w.work);
166	struct rds_connection *conn = cp->cp_conn;
167	int ret;
168
169	if (cp->cp_index > 0 &&
170	    !IS_CANONICAL(cp->cp_conn->c_laddr, cp->cp_conn->c_faddr))
171		return;
172	clear_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
173	ret = rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
174	if (ret) {
175		ret = conn->c_trans->conn_path_connect(cp);
176		rdsdebug("conn %p for %pI4 to %pI4 dispatched, ret %d\n",
177			conn, &conn->c_laddr, &conn->c_faddr, ret);
178
179		if (ret) {
180			if (rds_conn_path_transition(cp,
181						     RDS_CONN_CONNECTING,
182						     RDS_CONN_DOWN))
183				rds_queue_reconnect(cp);
184			else
185				rds_conn_path_error(cp, "connect failed\n");
186		}
187	}
188}
189
190void rds_send_worker(struct work_struct *work)
191{
192	struct rds_conn_path *cp = container_of(work,
193						struct rds_conn_path,
194						cp_send_w.work);
195	int ret;
196
197	if (rds_conn_path_state(cp) == RDS_CONN_UP) {
198		clear_bit(RDS_LL_SEND_FULL, &cp->cp_flags);
199		ret = rds_send_xmit(cp);
200		cond_resched();
201		rdsdebug("conn %p ret %d\n", cp->cp_conn, ret);
202		switch (ret) {
203		case -EAGAIN:
204			rds_stats_inc(s_send_immediate_retry);
205			queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
206			break;
207		case -ENOMEM:
208			rds_stats_inc(s_send_delayed_retry);
209			queue_delayed_work(rds_wq, &cp->cp_send_w, 2);
210		default:
211			break;
212		}
213	}
214}
215
216void rds_recv_worker(struct work_struct *work)
217{
218	struct rds_conn_path *cp = container_of(work,
219						struct rds_conn_path,
220						cp_recv_w.work);
221	int ret;
222
223	if (rds_conn_path_state(cp) == RDS_CONN_UP) {
224		ret = cp->cp_conn->c_trans->recv_path(cp);
225		rdsdebug("conn %p ret %d\n", cp->cp_conn, ret);
226		switch (ret) {
227		case -EAGAIN:
228			rds_stats_inc(s_recv_immediate_retry);
229			queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
230			break;
231		case -ENOMEM:
232			rds_stats_inc(s_recv_delayed_retry);
233			queue_delayed_work(rds_wq, &cp->cp_recv_w, 2);
234		default:
235			break;
236		}
237	}
238}
239
240void rds_shutdown_worker(struct work_struct *work)
241{
242	struct rds_conn_path *cp = container_of(work,
243						struct rds_conn_path,
244						cp_down_w);
245
246	rds_conn_shutdown(cp);
247}
248
249void rds_threads_exit(void)
250{
251	destroy_workqueue(rds_wq);
252}
253
254int rds_threads_init(void)
255{
256	rds_wq = create_singlethread_workqueue("krdsd");
257	if (!rds_wq)
258		return -ENOMEM;
259
260	return 0;
261}