Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright (c) 2006 Oracle.  All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 */
 33#include <linux/kernel.h>
 34#include <linux/list.h>
 35#include <linux/slab.h>
 
 36#include <net/inet_hashtables.h>
 37
 38#include "rds.h"
 39#include "loop.h"
 40
 41#define RDS_CONNECTION_HASH_BITS 12
 42#define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
 43#define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
 44
 45/* converting this to RCU is a chore for another day.. */
 46static DEFINE_SPINLOCK(rds_conn_lock);
 47static unsigned long rds_conn_count;
 48static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES];
 49static struct kmem_cache *rds_conn_slab;
 50
 51static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
 52{
 53	/* Pass NULL, don't need struct net for hash */
 54	unsigned long hash = inet_ehashfn(NULL,
 55					  be32_to_cpu(laddr), 0,
 56					  be32_to_cpu(faddr), 0);
 57	return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
 58}
 59
 60#define rds_conn_info_set(var, test, suffix) do {		\
 61	if (test)						\
 62		var |= RDS_INFO_CONNECTION_FLAG_##suffix;	\
 63} while (0)
 64
 65/* rcu read lock must be held or the connection spinlock */
 66static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
 67					      __be32 laddr, __be32 faddr,
 68					      struct rds_transport *trans)
 69{
 70	struct rds_connection *conn, *ret = NULL;
 71	struct hlist_node *pos;
 72
 73	hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
 74		if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
 75				conn->c_trans == trans) {
 76			ret = conn;
 77			break;
 78		}
 79	}
 80	rdsdebug("returning conn %p for %pI4 -> %pI4\n", ret,
 81		 &laddr, &faddr);
 82	return ret;
 83}
 84
 85/*
 86 * This is called by transports as they're bringing down a connection.
 87 * It clears partial message state so that the transport can start sending
 88 * and receiving over this connection again in the future.  It is up to
 89 * the transport to have serialized this call with its send and recv.
 90 */
 91static void rds_conn_reset(struct rds_connection *conn)
 92{
 93	rdsdebug("connection %pI4 to %pI4 reset\n",
 94	  &conn->c_laddr, &conn->c_faddr);
 95
 96	rds_stats_inc(s_conn_reset);
 97	rds_send_reset(conn);
 98	conn->c_flags = 0;
 99
100	/* Do not clear next_rx_seq here, else we cannot distinguish
101	 * retransmitted packets from new packets, and will hand all
102	 * of them to the application. That is not consistent with the
103	 * reliability guarantees of RDS. */
104}
105
106/*
107 * There is only every one 'conn' for a given pair of addresses in the
108 * system at a time.  They contain messages to be retransmitted and so
109 * span the lifetime of the actual underlying transport connections.
110 *
111 * For now they are not garbage collected once they're created.  They
112 * are torn down as the module is removed, if ever.
113 */
114static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
115				       struct rds_transport *trans, gfp_t gfp,
116				       int is_outgoing)
117{
118	struct rds_connection *conn, *parent = NULL;
119	struct hlist_head *head = rds_conn_bucket(laddr, faddr);
120	struct rds_transport *loop_trans;
121	unsigned long flags;
122	int ret;
123
124	rcu_read_lock();
125	conn = rds_conn_lookup(head, laddr, faddr, trans);
126	if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
127	    !is_outgoing) {
128		/* This is a looped back IB connection, and we're
129		 * called by the code handling the incoming connect.
130		 * We need a second connection object into which we
131		 * can stick the other QP. */
132		parent = conn;
133		conn = parent->c_passive;
134	}
135	rcu_read_unlock();
136	if (conn)
137		goto out;
138
139	conn = kmem_cache_zalloc(rds_conn_slab, gfp);
140	if (!conn) {
141		conn = ERR_PTR(-ENOMEM);
142		goto out;
143	}
144
145	INIT_HLIST_NODE(&conn->c_hash_node);
146	conn->c_laddr = laddr;
147	conn->c_faddr = faddr;
148	spin_lock_init(&conn->c_lock);
149	conn->c_next_tx_seq = 1;
150
151	init_waitqueue_head(&conn->c_waitq);
152	INIT_LIST_HEAD(&conn->c_send_queue);
153	INIT_LIST_HEAD(&conn->c_retrans);
154
155	ret = rds_cong_get_maps(conn);
156	if (ret) {
157		kmem_cache_free(rds_conn_slab, conn);
158		conn = ERR_PTR(ret);
159		goto out;
160	}
161
162	/*
163	 * This is where a connection becomes loopback.  If *any* RDS sockets
164	 * can bind to the destination address then we'd rather the messages
165	 * flow through loopback rather than either transport.
166	 */
167	loop_trans = rds_trans_get_preferred(faddr);
168	if (loop_trans) {
169		rds_trans_put(loop_trans);
170		conn->c_loopback = 1;
171		if (is_outgoing && trans->t_prefer_loopback) {
172			/* "outgoing" connection - and the transport
173			 * says it wants the connection handled by the
174			 * loopback transport. This is what TCP does.
175			 */
176			trans = &rds_loop_transport;
177		}
178	}
179
180	conn->c_trans = trans;
181
182	ret = trans->conn_alloc(conn, gfp);
183	if (ret) {
184		kmem_cache_free(rds_conn_slab, conn);
185		conn = ERR_PTR(ret);
186		goto out;
187	}
188
189	atomic_set(&conn->c_state, RDS_CONN_DOWN);
190	conn->c_reconnect_jiffies = 0;
191	INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker);
192	INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker);
193	INIT_DELAYED_WORK(&conn->c_conn_w, rds_connect_worker);
194	INIT_WORK(&conn->c_down_w, rds_shutdown_worker);
195	mutex_init(&conn->c_cm_lock);
196	conn->c_flags = 0;
197
198	rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n",
199	  conn, &laddr, &faddr,
200	  trans->t_name ? trans->t_name : "[unknown]",
201	  is_outgoing ? "(outgoing)" : "");
202
203	/*
204	 * Since we ran without holding the conn lock, someone could
205	 * have created the same conn (either normal or passive) in the
206	 * interim. We check while holding the lock. If we won, we complete
207	 * init and return our conn. If we lost, we rollback and return the
208	 * other one.
209	 */
210	spin_lock_irqsave(&rds_conn_lock, flags);
211	if (parent) {
212		/* Creating passive conn */
213		if (parent->c_passive) {
214			trans->conn_free(conn->c_transport_data);
215			kmem_cache_free(rds_conn_slab, conn);
216			conn = parent->c_passive;
217		} else {
218			parent->c_passive = conn;
219			rds_cong_add_conn(conn);
220			rds_conn_count++;
221		}
222	} else {
223		/* Creating normal conn */
224		struct rds_connection *found;
225
226		found = rds_conn_lookup(head, laddr, faddr, trans);
227		if (found) {
228			trans->conn_free(conn->c_transport_data);
229			kmem_cache_free(rds_conn_slab, conn);
230			conn = found;
231		} else {
232			hlist_add_head_rcu(&conn->c_hash_node, head);
233			rds_cong_add_conn(conn);
234			rds_conn_count++;
235		}
236	}
237	spin_unlock_irqrestore(&rds_conn_lock, flags);
238
239out:
240	return conn;
241}
242
243struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
244				       struct rds_transport *trans, gfp_t gfp)
245{
246	return __rds_conn_create(laddr, faddr, trans, gfp, 0);
247}
248EXPORT_SYMBOL_GPL(rds_conn_create);
249
250struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
251				       struct rds_transport *trans, gfp_t gfp)
252{
253	return __rds_conn_create(laddr, faddr, trans, gfp, 1);
254}
255EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
256
257void rds_conn_shutdown(struct rds_connection *conn)
258{
259	/* shut it down unless it's down already */
260	if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
261		/*
262		 * Quiesce the connection mgmt handlers before we start tearing
263		 * things down. We don't hold the mutex for the entire
264		 * duration of the shutdown operation, else we may be
265		 * deadlocking with the CM handler. Instead, the CM event
266		 * handler is supposed to check for state DISCONNECTING
267		 */
268		mutex_lock(&conn->c_cm_lock);
269		if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING)
270		 && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) {
271			rds_conn_error(conn, "shutdown called in state %d\n",
272					atomic_read(&conn->c_state));
273			mutex_unlock(&conn->c_cm_lock);
274			return;
275		}
276		mutex_unlock(&conn->c_cm_lock);
277
278		wait_event(conn->c_waitq,
279			   !test_bit(RDS_IN_XMIT, &conn->c_flags));
280
281		conn->c_trans->conn_shutdown(conn);
282		rds_conn_reset(conn);
283
284		if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) {
285			/* This can happen - eg when we're in the middle of tearing
286			 * down the connection, and someone unloads the rds module.
287			 * Quite reproduceable with loopback connections.
288			 * Mostly harmless.
289			 */
290			rds_conn_error(conn,
291				"%s: failed to transition to state DOWN, "
292				"current state is %d\n",
293				__func__,
294				atomic_read(&conn->c_state));
295			return;
296		}
297	}
298
299	/* Then reconnect if it's still live.
300	 * The passive side of an IB loopback connection is never added
301	 * to the conn hash, so we never trigger a reconnect on this
302	 * conn - the reconnect is always triggered by the active peer. */
303	cancel_delayed_work_sync(&conn->c_conn_w);
304	rcu_read_lock();
305	if (!hlist_unhashed(&conn->c_hash_node)) {
306		rcu_read_unlock();
307		rds_queue_reconnect(conn);
308	} else {
309		rcu_read_unlock();
310	}
311}
312
313/*
314 * Stop and free a connection.
315 *
316 * This can only be used in very limited circumstances.  It assumes that once
317 * the conn has been shutdown that no one else is referencing the connection.
318 * We can only ensure this in the rmmod path in the current code.
319 */
320void rds_conn_destroy(struct rds_connection *conn)
321{
322	struct rds_message *rm, *rtmp;
323	unsigned long flags;
324
325	rdsdebug("freeing conn %p for %pI4 -> "
326		 "%pI4\n", conn, &conn->c_laddr,
327		 &conn->c_faddr);
328
329	/* Ensure conn will not be scheduled for reconnect */
330	spin_lock_irq(&rds_conn_lock);
331	hlist_del_init_rcu(&conn->c_hash_node);
332	spin_unlock_irq(&rds_conn_lock);
333	synchronize_rcu();
334
335	/* shut the connection down */
336	rds_conn_drop(conn);
337	flush_work(&conn->c_down_w);
338
339	/* make sure lingering queued work won't try to ref the conn */
340	cancel_delayed_work_sync(&conn->c_send_w);
341	cancel_delayed_work_sync(&conn->c_recv_w);
342
343	/* tear down queued messages */
344	list_for_each_entry_safe(rm, rtmp,
345				 &conn->c_send_queue,
346				 m_conn_item) {
347		list_del_init(&rm->m_conn_item);
348		BUG_ON(!list_empty(&rm->m_sock_item));
349		rds_message_put(rm);
350	}
351	if (conn->c_xmit_rm)
352		rds_message_put(conn->c_xmit_rm);
353
354	conn->c_trans->conn_free(conn->c_transport_data);
355
356	/*
357	 * The congestion maps aren't freed up here.  They're
358	 * freed by rds_cong_exit() after all the connections
359	 * have been freed.
360	 */
361	rds_cong_remove_conn(conn);
362
363	BUG_ON(!list_empty(&conn->c_retrans));
364	kmem_cache_free(rds_conn_slab, conn);
365
366	spin_lock_irqsave(&rds_conn_lock, flags);
367	rds_conn_count--;
368	spin_unlock_irqrestore(&rds_conn_lock, flags);
369}
370EXPORT_SYMBOL_GPL(rds_conn_destroy);
371
372static void rds_conn_message_info(struct socket *sock, unsigned int len,
373				  struct rds_info_iterator *iter,
374				  struct rds_info_lengths *lens,
375				  int want_send)
376{
377	struct hlist_head *head;
378	struct hlist_node *pos;
379	struct list_head *list;
380	struct rds_connection *conn;
381	struct rds_message *rm;
382	unsigned int total = 0;
383	unsigned long flags;
384	size_t i;
385
386	len /= sizeof(struct rds_info_message);
387
388	rcu_read_lock();
389
390	for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
391	     i++, head++) {
392		hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
393			if (want_send)
394				list = &conn->c_send_queue;
395			else
396				list = &conn->c_retrans;
397
398			spin_lock_irqsave(&conn->c_lock, flags);
399
400			/* XXX too lazy to maintain counts.. */
401			list_for_each_entry(rm, list, m_conn_item) {
402				total++;
403				if (total <= len)
404					rds_inc_info_copy(&rm->m_inc, iter,
405							  conn->c_laddr,
406							  conn->c_faddr, 0);
407			}
408
409			spin_unlock_irqrestore(&conn->c_lock, flags);
410		}
411	}
412	rcu_read_unlock();
413
414	lens->nr = total;
415	lens->each = sizeof(struct rds_info_message);
416}
417
418static void rds_conn_message_info_send(struct socket *sock, unsigned int len,
419				       struct rds_info_iterator *iter,
420				       struct rds_info_lengths *lens)
421{
422	rds_conn_message_info(sock, len, iter, lens, 1);
423}
424
425static void rds_conn_message_info_retrans(struct socket *sock,
426					  unsigned int len,
427					  struct rds_info_iterator *iter,
428					  struct rds_info_lengths *lens)
429{
430	rds_conn_message_info(sock, len, iter, lens, 0);
431}
432
433void rds_for_each_conn_info(struct socket *sock, unsigned int len,
434			  struct rds_info_iterator *iter,
435			  struct rds_info_lengths *lens,
436			  int (*visitor)(struct rds_connection *, void *),
437			  size_t item_len)
438{
439	uint64_t buffer[(item_len + 7) / 8];
440	struct hlist_head *head;
441	struct hlist_node *pos;
442	struct rds_connection *conn;
443	size_t i;
444
445	rcu_read_lock();
446
447	lens->nr = 0;
448	lens->each = item_len;
449
450	for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
451	     i++, head++) {
452		hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
453
454			/* XXX no c_lock usage.. */
455			if (!visitor(conn, buffer))
456				continue;
457
458			/* We copy as much as we can fit in the buffer,
459			 * but we count all items so that the caller
460			 * can resize the buffer. */
461			if (len >= item_len) {
462				rds_info_copy(iter, buffer, item_len);
463				len -= item_len;
464			}
465			lens->nr++;
466		}
467	}
468	rcu_read_unlock();
469}
470EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
471
472static int rds_conn_info_visitor(struct rds_connection *conn,
473				  void *buffer)
474{
475	struct rds_info_connection *cinfo = buffer;
476
477	cinfo->next_tx_seq = conn->c_next_tx_seq;
478	cinfo->next_rx_seq = conn->c_next_rx_seq;
479	cinfo->laddr = conn->c_laddr;
480	cinfo->faddr = conn->c_faddr;
481	strncpy(cinfo->transport, conn->c_trans->t_name,
482		sizeof(cinfo->transport));
483	cinfo->flags = 0;
484
485	rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &conn->c_flags),
486			  SENDING);
487	/* XXX Future: return the state rather than these funky bits */
488	rds_conn_info_set(cinfo->flags,
489			  atomic_read(&conn->c_state) == RDS_CONN_CONNECTING,
490			  CONNECTING);
491	rds_conn_info_set(cinfo->flags,
492			  atomic_read(&conn->c_state) == RDS_CONN_UP,
493			  CONNECTED);
494	return 1;
495}
496
497static void rds_conn_info(struct socket *sock, unsigned int len,
498			  struct rds_info_iterator *iter,
499			  struct rds_info_lengths *lens)
500{
501	rds_for_each_conn_info(sock, len, iter, lens,
502				rds_conn_info_visitor,
503				sizeof(struct rds_info_connection));
504}
505
506int rds_conn_init(void)
507{
508	rds_conn_slab = kmem_cache_create("rds_connection",
509					  sizeof(struct rds_connection),
510					  0, 0, NULL);
511	if (!rds_conn_slab)
512		return -ENOMEM;
513
514	rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
515	rds_info_register_func(RDS_INFO_SEND_MESSAGES,
516			       rds_conn_message_info_send);
517	rds_info_register_func(RDS_INFO_RETRANS_MESSAGES,
518			       rds_conn_message_info_retrans);
519
520	return 0;
521}
522
523void rds_conn_exit(void)
524{
525	rds_loop_exit();
526
527	WARN_ON(!hlist_empty(rds_conn_hash));
528
529	kmem_cache_destroy(rds_conn_slab);
530
531	rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info);
532	rds_info_deregister_func(RDS_INFO_SEND_MESSAGES,
533				 rds_conn_message_info_send);
534	rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES,
535				 rds_conn_message_info_retrans);
536}
537
538/*
539 * Force a disconnect
540 */
541void rds_conn_drop(struct rds_connection *conn)
542{
543	atomic_set(&conn->c_state, RDS_CONN_ERROR);
544	queue_work(rds_wq, &conn->c_down_w);
545}
546EXPORT_SYMBOL_GPL(rds_conn_drop);
547
548/*
549 * If the connection is down, trigger a connect. We may have scheduled a
550 * delayed reconnect however - in this case we should not interfere.
551 */
552void rds_conn_connect_if_down(struct rds_connection *conn)
553{
554	if (rds_conn_state(conn) == RDS_CONN_DOWN &&
555	    !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
556		queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
557}
558EXPORT_SYMBOL_GPL(rds_conn_connect_if_down);
559
560/*
561 * An error occurred on the connection
562 */
563void
564__rds_conn_error(struct rds_connection *conn, const char *fmt, ...)
565{
566	va_list ap;
567
568	va_start(ap, fmt);
569	vprintk(fmt, ap);
570	va_end(ap);
571
572	rds_conn_drop(conn);
573}
v3.5.6
  1/*
  2 * Copyright (c) 2006 Oracle.  All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 */
 33#include <linux/kernel.h>
 34#include <linux/list.h>
 35#include <linux/slab.h>
 36#include <linux/export.h>
 37#include <net/inet_hashtables.h>
 38
 39#include "rds.h"
 40#include "loop.h"
 41
 42#define RDS_CONNECTION_HASH_BITS 12
 43#define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
 44#define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)
 45
 46/* converting this to RCU is a chore for another day.. */
 47static DEFINE_SPINLOCK(rds_conn_lock);
 48static unsigned long rds_conn_count;
 49static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES];
 50static struct kmem_cache *rds_conn_slab;
 51
 52static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
 53{
 54	/* Pass NULL, don't need struct net for hash */
 55	unsigned long hash = inet_ehashfn(NULL,
 56					  be32_to_cpu(laddr), 0,
 57					  be32_to_cpu(faddr), 0);
 58	return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
 59}
 60
 61#define rds_conn_info_set(var, test, suffix) do {		\
 62	if (test)						\
 63		var |= RDS_INFO_CONNECTION_FLAG_##suffix;	\
 64} while (0)
 65
 66/* rcu read lock must be held or the connection spinlock */
 67static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
 68					      __be32 laddr, __be32 faddr,
 69					      struct rds_transport *trans)
 70{
 71	struct rds_connection *conn, *ret = NULL;
 72	struct hlist_node *pos;
 73
 74	hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
 75		if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
 76				conn->c_trans == trans) {
 77			ret = conn;
 78			break;
 79		}
 80	}
 81	rdsdebug("returning conn %p for %pI4 -> %pI4\n", ret,
 82		 &laddr, &faddr);
 83	return ret;
 84}
 85
 86/*
 87 * This is called by transports as they're bringing down a connection.
 88 * It clears partial message state so that the transport can start sending
 89 * and receiving over this connection again in the future.  It is up to
 90 * the transport to have serialized this call with its send and recv.
 91 */
 92static void rds_conn_reset(struct rds_connection *conn)
 93{
 94	rdsdebug("connection %pI4 to %pI4 reset\n",
 95	  &conn->c_laddr, &conn->c_faddr);
 96
 97	rds_stats_inc(s_conn_reset);
 98	rds_send_reset(conn);
 99	conn->c_flags = 0;
100
101	/* Do not clear next_rx_seq here, else we cannot distinguish
102	 * retransmitted packets from new packets, and will hand all
103	 * of them to the application. That is not consistent with the
104	 * reliability guarantees of RDS. */
105}
106
107/*
108 * There is only every one 'conn' for a given pair of addresses in the
109 * system at a time.  They contain messages to be retransmitted and so
110 * span the lifetime of the actual underlying transport connections.
111 *
112 * For now they are not garbage collected once they're created.  They
113 * are torn down as the module is removed, if ever.
114 */
115static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
116				       struct rds_transport *trans, gfp_t gfp,
117				       int is_outgoing)
118{
119	struct rds_connection *conn, *parent = NULL;
120	struct hlist_head *head = rds_conn_bucket(laddr, faddr);
121	struct rds_transport *loop_trans;
122	unsigned long flags;
123	int ret;
124
125	rcu_read_lock();
126	conn = rds_conn_lookup(head, laddr, faddr, trans);
127	if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
128	    !is_outgoing) {
129		/* This is a looped back IB connection, and we're
130		 * called by the code handling the incoming connect.
131		 * We need a second connection object into which we
132		 * can stick the other QP. */
133		parent = conn;
134		conn = parent->c_passive;
135	}
136	rcu_read_unlock();
137	if (conn)
138		goto out;
139
140	conn = kmem_cache_zalloc(rds_conn_slab, gfp);
141	if (!conn) {
142		conn = ERR_PTR(-ENOMEM);
143		goto out;
144	}
145
146	INIT_HLIST_NODE(&conn->c_hash_node);
147	conn->c_laddr = laddr;
148	conn->c_faddr = faddr;
149	spin_lock_init(&conn->c_lock);
150	conn->c_next_tx_seq = 1;
151
152	init_waitqueue_head(&conn->c_waitq);
153	INIT_LIST_HEAD(&conn->c_send_queue);
154	INIT_LIST_HEAD(&conn->c_retrans);
155
156	ret = rds_cong_get_maps(conn);
157	if (ret) {
158		kmem_cache_free(rds_conn_slab, conn);
159		conn = ERR_PTR(ret);
160		goto out;
161	}
162
163	/*
164	 * This is where a connection becomes loopback.  If *any* RDS sockets
165	 * can bind to the destination address then we'd rather the messages
166	 * flow through loopback rather than either transport.
167	 */
168	loop_trans = rds_trans_get_preferred(faddr);
169	if (loop_trans) {
170		rds_trans_put(loop_trans);
171		conn->c_loopback = 1;
172		if (is_outgoing && trans->t_prefer_loopback) {
173			/* "outgoing" connection - and the transport
174			 * says it wants the connection handled by the
175			 * loopback transport. This is what TCP does.
176			 */
177			trans = &rds_loop_transport;
178		}
179	}
180
181	conn->c_trans = trans;
182
183	ret = trans->conn_alloc(conn, gfp);
184	if (ret) {
185		kmem_cache_free(rds_conn_slab, conn);
186		conn = ERR_PTR(ret);
187		goto out;
188	}
189
190	atomic_set(&conn->c_state, RDS_CONN_DOWN);
191	conn->c_reconnect_jiffies = 0;
192	INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker);
193	INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker);
194	INIT_DELAYED_WORK(&conn->c_conn_w, rds_connect_worker);
195	INIT_WORK(&conn->c_down_w, rds_shutdown_worker);
196	mutex_init(&conn->c_cm_lock);
197	conn->c_flags = 0;
198
199	rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n",
200	  conn, &laddr, &faddr,
201	  trans->t_name ? trans->t_name : "[unknown]",
202	  is_outgoing ? "(outgoing)" : "");
203
204	/*
205	 * Since we ran without holding the conn lock, someone could
206	 * have created the same conn (either normal or passive) in the
207	 * interim. We check while holding the lock. If we won, we complete
208	 * init and return our conn. If we lost, we rollback and return the
209	 * other one.
210	 */
211	spin_lock_irqsave(&rds_conn_lock, flags);
212	if (parent) {
213		/* Creating passive conn */
214		if (parent->c_passive) {
215			trans->conn_free(conn->c_transport_data);
216			kmem_cache_free(rds_conn_slab, conn);
217			conn = parent->c_passive;
218		} else {
219			parent->c_passive = conn;
220			rds_cong_add_conn(conn);
221			rds_conn_count++;
222		}
223	} else {
224		/* Creating normal conn */
225		struct rds_connection *found;
226
227		found = rds_conn_lookup(head, laddr, faddr, trans);
228		if (found) {
229			trans->conn_free(conn->c_transport_data);
230			kmem_cache_free(rds_conn_slab, conn);
231			conn = found;
232		} else {
233			hlist_add_head_rcu(&conn->c_hash_node, head);
234			rds_cong_add_conn(conn);
235			rds_conn_count++;
236		}
237	}
238	spin_unlock_irqrestore(&rds_conn_lock, flags);
239
240out:
241	return conn;
242}
243
244struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
245				       struct rds_transport *trans, gfp_t gfp)
246{
247	return __rds_conn_create(laddr, faddr, trans, gfp, 0);
248}
249EXPORT_SYMBOL_GPL(rds_conn_create);
250
251struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
252				       struct rds_transport *trans, gfp_t gfp)
253{
254	return __rds_conn_create(laddr, faddr, trans, gfp, 1);
255}
256EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
257
258void rds_conn_shutdown(struct rds_connection *conn)
259{
260	/* shut it down unless it's down already */
261	if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
262		/*
263		 * Quiesce the connection mgmt handlers before we start tearing
264		 * things down. We don't hold the mutex for the entire
265		 * duration of the shutdown operation, else we may be
266		 * deadlocking with the CM handler. Instead, the CM event
267		 * handler is supposed to check for state DISCONNECTING
268		 */
269		mutex_lock(&conn->c_cm_lock);
270		if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING)
271		 && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) {
272			rds_conn_error(conn, "shutdown called in state %d\n",
273					atomic_read(&conn->c_state));
274			mutex_unlock(&conn->c_cm_lock);
275			return;
276		}
277		mutex_unlock(&conn->c_cm_lock);
278
279		wait_event(conn->c_waitq,
280			   !test_bit(RDS_IN_XMIT, &conn->c_flags));
281
282		conn->c_trans->conn_shutdown(conn);
283		rds_conn_reset(conn);
284
285		if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) {
286			/* This can happen - eg when we're in the middle of tearing
287			 * down the connection, and someone unloads the rds module.
288			 * Quite reproduceable with loopback connections.
289			 * Mostly harmless.
290			 */
291			rds_conn_error(conn,
292				"%s: failed to transition to state DOWN, "
293				"current state is %d\n",
294				__func__,
295				atomic_read(&conn->c_state));
296			return;
297		}
298	}
299
300	/* Then reconnect if it's still live.
301	 * The passive side of an IB loopback connection is never added
302	 * to the conn hash, so we never trigger a reconnect on this
303	 * conn - the reconnect is always triggered by the active peer. */
304	cancel_delayed_work_sync(&conn->c_conn_w);
305	rcu_read_lock();
306	if (!hlist_unhashed(&conn->c_hash_node)) {
307		rcu_read_unlock();
308		rds_queue_reconnect(conn);
309	} else {
310		rcu_read_unlock();
311	}
312}
313
314/*
315 * Stop and free a connection.
316 *
317 * This can only be used in very limited circumstances.  It assumes that once
318 * the conn has been shutdown that no one else is referencing the connection.
319 * We can only ensure this in the rmmod path in the current code.
320 */
321void rds_conn_destroy(struct rds_connection *conn)
322{
323	struct rds_message *rm, *rtmp;
324	unsigned long flags;
325
326	rdsdebug("freeing conn %p for %pI4 -> "
327		 "%pI4\n", conn, &conn->c_laddr,
328		 &conn->c_faddr);
329
330	/* Ensure conn will not be scheduled for reconnect */
331	spin_lock_irq(&rds_conn_lock);
332	hlist_del_init_rcu(&conn->c_hash_node);
333	spin_unlock_irq(&rds_conn_lock);
334	synchronize_rcu();
335
336	/* shut the connection down */
337	rds_conn_drop(conn);
338	flush_work(&conn->c_down_w);
339
340	/* make sure lingering queued work won't try to ref the conn */
341	cancel_delayed_work_sync(&conn->c_send_w);
342	cancel_delayed_work_sync(&conn->c_recv_w);
343
344	/* tear down queued messages */
345	list_for_each_entry_safe(rm, rtmp,
346				 &conn->c_send_queue,
347				 m_conn_item) {
348		list_del_init(&rm->m_conn_item);
349		BUG_ON(!list_empty(&rm->m_sock_item));
350		rds_message_put(rm);
351	}
352	if (conn->c_xmit_rm)
353		rds_message_put(conn->c_xmit_rm);
354
355	conn->c_trans->conn_free(conn->c_transport_data);
356
357	/*
358	 * The congestion maps aren't freed up here.  They're
359	 * freed by rds_cong_exit() after all the connections
360	 * have been freed.
361	 */
362	rds_cong_remove_conn(conn);
363
364	BUG_ON(!list_empty(&conn->c_retrans));
365	kmem_cache_free(rds_conn_slab, conn);
366
367	spin_lock_irqsave(&rds_conn_lock, flags);
368	rds_conn_count--;
369	spin_unlock_irqrestore(&rds_conn_lock, flags);
370}
371EXPORT_SYMBOL_GPL(rds_conn_destroy);
372
373static void rds_conn_message_info(struct socket *sock, unsigned int len,
374				  struct rds_info_iterator *iter,
375				  struct rds_info_lengths *lens,
376				  int want_send)
377{
378	struct hlist_head *head;
379	struct hlist_node *pos;
380	struct list_head *list;
381	struct rds_connection *conn;
382	struct rds_message *rm;
383	unsigned int total = 0;
384	unsigned long flags;
385	size_t i;
386
387	len /= sizeof(struct rds_info_message);
388
389	rcu_read_lock();
390
391	for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
392	     i++, head++) {
393		hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
394			if (want_send)
395				list = &conn->c_send_queue;
396			else
397				list = &conn->c_retrans;
398
399			spin_lock_irqsave(&conn->c_lock, flags);
400
401			/* XXX too lazy to maintain counts.. */
402			list_for_each_entry(rm, list, m_conn_item) {
403				total++;
404				if (total <= len)
405					rds_inc_info_copy(&rm->m_inc, iter,
406							  conn->c_laddr,
407							  conn->c_faddr, 0);
408			}
409
410			spin_unlock_irqrestore(&conn->c_lock, flags);
411		}
412	}
413	rcu_read_unlock();
414
415	lens->nr = total;
416	lens->each = sizeof(struct rds_info_message);
417}
418
419static void rds_conn_message_info_send(struct socket *sock, unsigned int len,
420				       struct rds_info_iterator *iter,
421				       struct rds_info_lengths *lens)
422{
423	rds_conn_message_info(sock, len, iter, lens, 1);
424}
425
426static void rds_conn_message_info_retrans(struct socket *sock,
427					  unsigned int len,
428					  struct rds_info_iterator *iter,
429					  struct rds_info_lengths *lens)
430{
431	rds_conn_message_info(sock, len, iter, lens, 0);
432}
433
434void rds_for_each_conn_info(struct socket *sock, unsigned int len,
435			  struct rds_info_iterator *iter,
436			  struct rds_info_lengths *lens,
437			  int (*visitor)(struct rds_connection *, void *),
438			  size_t item_len)
439{
440	uint64_t buffer[(item_len + 7) / 8];
441	struct hlist_head *head;
442	struct hlist_node *pos;
443	struct rds_connection *conn;
444	size_t i;
445
446	rcu_read_lock();
447
448	lens->nr = 0;
449	lens->each = item_len;
450
451	for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
452	     i++, head++) {
453		hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
454
455			/* XXX no c_lock usage.. */
456			if (!visitor(conn, buffer))
457				continue;
458
459			/* We copy as much as we can fit in the buffer,
460			 * but we count all items so that the caller
461			 * can resize the buffer. */
462			if (len >= item_len) {
463				rds_info_copy(iter, buffer, item_len);
464				len -= item_len;
465			}
466			lens->nr++;
467		}
468	}
469	rcu_read_unlock();
470}
471EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
472
473static int rds_conn_info_visitor(struct rds_connection *conn,
474				  void *buffer)
475{
476	struct rds_info_connection *cinfo = buffer;
477
478	cinfo->next_tx_seq = conn->c_next_tx_seq;
479	cinfo->next_rx_seq = conn->c_next_rx_seq;
480	cinfo->laddr = conn->c_laddr;
481	cinfo->faddr = conn->c_faddr;
482	strncpy(cinfo->transport, conn->c_trans->t_name,
483		sizeof(cinfo->transport));
484	cinfo->flags = 0;
485
486	rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &conn->c_flags),
487			  SENDING);
488	/* XXX Future: return the state rather than these funky bits */
489	rds_conn_info_set(cinfo->flags,
490			  atomic_read(&conn->c_state) == RDS_CONN_CONNECTING,
491			  CONNECTING);
492	rds_conn_info_set(cinfo->flags,
493			  atomic_read(&conn->c_state) == RDS_CONN_UP,
494			  CONNECTED);
495	return 1;
496}
497
498static void rds_conn_info(struct socket *sock, unsigned int len,
499			  struct rds_info_iterator *iter,
500			  struct rds_info_lengths *lens)
501{
502	rds_for_each_conn_info(sock, len, iter, lens,
503				rds_conn_info_visitor,
504				sizeof(struct rds_info_connection));
505}
506
507int rds_conn_init(void)
508{
509	rds_conn_slab = kmem_cache_create("rds_connection",
510					  sizeof(struct rds_connection),
511					  0, 0, NULL);
512	if (!rds_conn_slab)
513		return -ENOMEM;
514
515	rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
516	rds_info_register_func(RDS_INFO_SEND_MESSAGES,
517			       rds_conn_message_info_send);
518	rds_info_register_func(RDS_INFO_RETRANS_MESSAGES,
519			       rds_conn_message_info_retrans);
520
521	return 0;
522}
523
524void rds_conn_exit(void)
525{
526	rds_loop_exit();
527
528	WARN_ON(!hlist_empty(rds_conn_hash));
529
530	kmem_cache_destroy(rds_conn_slab);
531
532	rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info);
533	rds_info_deregister_func(RDS_INFO_SEND_MESSAGES,
534				 rds_conn_message_info_send);
535	rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES,
536				 rds_conn_message_info_retrans);
537}
538
539/*
540 * Force a disconnect
541 */
542void rds_conn_drop(struct rds_connection *conn)
543{
544	atomic_set(&conn->c_state, RDS_CONN_ERROR);
545	queue_work(rds_wq, &conn->c_down_w);
546}
547EXPORT_SYMBOL_GPL(rds_conn_drop);
548
549/*
550 * If the connection is down, trigger a connect. We may have scheduled a
551 * delayed reconnect however - in this case we should not interfere.
552 */
553void rds_conn_connect_if_down(struct rds_connection *conn)
554{
555	if (rds_conn_state(conn) == RDS_CONN_DOWN &&
556	    !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
557		queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
558}
559EXPORT_SYMBOL_GPL(rds_conn_connect_if_down);
560
561/*
562 * An error occurred on the connection
563 */
564void
565__rds_conn_error(struct rds_connection *conn, const char *fmt, ...)
566{
567	va_list ap;
568
569	va_start(ap, fmt);
570	vprintk(fmt, ap);
571	va_end(ap);
572
573	rds_conn_drop(conn);
574}