Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Copyright (c) 2006 Oracle.  All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 */
 33#include <linux/kernel.h>
 34#include <linux/in.h>
 35#include <linux/if.h>
 36#include <linux/netdevice.h>
 37#include <linux/inetdevice.h>
 38#include <linux/if_arp.h>
 39#include <linux/delay.h>
 40#include <linux/slab.h>
 41#include <linux/module.h>
 42
 43#include "rds.h"
 44#include "iw.h"
 45
 46unsigned int fastreg_pool_size = RDS_FASTREG_POOL_SIZE;
 47unsigned int fastreg_message_size = RDS_FASTREG_SIZE + 1; /* +1 allows for unaligned MRs */
 48
 49module_param(fastreg_pool_size, int, 0444);
 50MODULE_PARM_DESC(fastreg_pool_size, " Max number of fastreg MRs per device");
 51module_param(fastreg_message_size, int, 0444);
 52MODULE_PARM_DESC(fastreg_message_size, " Max size of a RDMA transfer (fastreg MRs)");
 53
 54struct list_head rds_iw_devices;
 55
 56/* NOTE: if also grabbing iwdev lock, grab this first */
 57DEFINE_SPINLOCK(iw_nodev_conns_lock);
 58LIST_HEAD(iw_nodev_conns);
 59
 60static void rds_iw_add_one(struct ib_device *device)
 61{
 62	struct rds_iw_device *rds_iwdev;
 63	struct ib_device_attr *dev_attr;
 64
 65	/* Only handle iwarp devices */
 66	if (device->node_type != RDMA_NODE_RNIC)
 67		return;
 68
 69	dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
 70	if (!dev_attr)
 71		return;
 72
 73	if (ib_query_device(device, dev_attr)) {
 74		rdsdebug("Query device failed for %s\n", device->name);
 75		goto free_attr;
 76	}
 77
 78	rds_iwdev = kmalloc(sizeof *rds_iwdev, GFP_KERNEL);
 79	if (!rds_iwdev)
 80		goto free_attr;
 81
 82	spin_lock_init(&rds_iwdev->spinlock);
 83
 84	rds_iwdev->dma_local_lkey = !!(dev_attr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY);
 85	rds_iwdev->max_wrs = dev_attr->max_qp_wr;
 86	rds_iwdev->max_sge = min(dev_attr->max_sge, RDS_IW_MAX_SGE);
 87
 88	rds_iwdev->dev = device;
 89	rds_iwdev->pd = ib_alloc_pd(device);
 90	if (IS_ERR(rds_iwdev->pd))
 91		goto free_dev;
 92
 93	if (!rds_iwdev->dma_local_lkey) {
 94		rds_iwdev->mr = ib_get_dma_mr(rds_iwdev->pd,
 95					IB_ACCESS_REMOTE_READ |
 96					IB_ACCESS_REMOTE_WRITE |
 97					IB_ACCESS_LOCAL_WRITE);
 98		if (IS_ERR(rds_iwdev->mr))
 99			goto err_pd;
100	} else
101		rds_iwdev->mr = NULL;
102
103	rds_iwdev->mr_pool = rds_iw_create_mr_pool(rds_iwdev);
104	if (IS_ERR(rds_iwdev->mr_pool)) {
105		rds_iwdev->mr_pool = NULL;
106		goto err_mr;
107	}
108
109	INIT_LIST_HEAD(&rds_iwdev->cm_id_list);
110	INIT_LIST_HEAD(&rds_iwdev->conn_list);
111	list_add_tail(&rds_iwdev->list, &rds_iw_devices);
112
113	ib_set_client_data(device, &rds_iw_client, rds_iwdev);
114
115	goto free_attr;
116
117err_mr:
118	if (rds_iwdev->mr)
119		ib_dereg_mr(rds_iwdev->mr);
120err_pd:
121	ib_dealloc_pd(rds_iwdev->pd);
122free_dev:
123	kfree(rds_iwdev);
124free_attr:
125	kfree(dev_attr);
126}
127
128static void rds_iw_remove_one(struct ib_device *device)
129{
130	struct rds_iw_device *rds_iwdev;
131	struct rds_iw_cm_id *i_cm_id, *next;
132
133	rds_iwdev = ib_get_client_data(device, &rds_iw_client);
134	if (!rds_iwdev)
135		return;
136
137	spin_lock_irq(&rds_iwdev->spinlock);
138	list_for_each_entry_safe(i_cm_id, next, &rds_iwdev->cm_id_list, list) {
139		list_del(&i_cm_id->list);
140		kfree(i_cm_id);
141	}
142	spin_unlock_irq(&rds_iwdev->spinlock);
143
144	rds_iw_destroy_conns(rds_iwdev);
145
146	if (rds_iwdev->mr_pool)
147		rds_iw_destroy_mr_pool(rds_iwdev->mr_pool);
148
149	if (rds_iwdev->mr)
150		ib_dereg_mr(rds_iwdev->mr);
151
152	while (ib_dealloc_pd(rds_iwdev->pd)) {
153		rdsdebug("Failed to dealloc pd %p\n", rds_iwdev->pd);
154		msleep(1);
155	}
156
157	list_del(&rds_iwdev->list);
158	kfree(rds_iwdev);
159}
160
161struct ib_client rds_iw_client = {
162	.name   = "rds_iw",
163	.add    = rds_iw_add_one,
164	.remove = rds_iw_remove_one
165};
166
167static int rds_iw_conn_info_visitor(struct rds_connection *conn,
168				    void *buffer)
169{
170	struct rds_info_rdma_connection *iinfo = buffer;
171	struct rds_iw_connection *ic;
172
173	/* We will only ever look at IB transports */
174	if (conn->c_trans != &rds_iw_transport)
175		return 0;
176
177	iinfo->src_addr = conn->c_laddr;
178	iinfo->dst_addr = conn->c_faddr;
179
180	memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
181	memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
182	if (rds_conn_state(conn) == RDS_CONN_UP) {
183		struct rds_iw_device *rds_iwdev;
184		struct rdma_dev_addr *dev_addr;
185
186		ic = conn->c_transport_data;
187		dev_addr = &ic->i_cm_id->route.addr.dev_addr;
188
189		rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
190		rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
191
192		rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
193		iinfo->max_send_wr = ic->i_send_ring.w_nr;
194		iinfo->max_recv_wr = ic->i_recv_ring.w_nr;
195		iinfo->max_send_sge = rds_iwdev->max_sge;
196		rds_iw_get_mr_info(rds_iwdev, iinfo);
197	}
198	return 1;
199}
200
201static void rds_iw_ic_info(struct socket *sock, unsigned int len,
202			   struct rds_info_iterator *iter,
203			   struct rds_info_lengths *lens)
204{
205	rds_for_each_conn_info(sock, len, iter, lens,
206				rds_iw_conn_info_visitor,
207				sizeof(struct rds_info_rdma_connection));
208}
209
210
211/*
212 * Early RDS/IB was built to only bind to an address if there is an IPoIB
213 * device with that address set.
214 *
215 * If it were me, I'd advocate for something more flexible.  Sending and
216 * receiving should be device-agnostic.  Transports would try and maintain
217 * connections between peers who have messages queued.  Userspace would be
218 * allowed to influence which paths have priority.  We could call userspace
219 * asserting this policy "routing".
220 */
221static int rds_iw_laddr_check(__be32 addr)
222{
223	int ret;
224	struct rdma_cm_id *cm_id;
225	struct sockaddr_in sin;
226
227	/* Create a CMA ID and try to bind it. This catches both
228	 * IB and iWARP capable NICs.
229	 */
230	cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
231	if (IS_ERR(cm_id))
232		return PTR_ERR(cm_id);
233
234	memset(&sin, 0, sizeof(sin));
235	sin.sin_family = AF_INET;
236	sin.sin_addr.s_addr = addr;
237
238	/* rdma_bind_addr will only succeed for IB & iWARP devices */
239	ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
240	/* due to this, we will claim to support IB devices unless we
241	   check node_type. */
242	if (ret || cm_id->device->node_type != RDMA_NODE_RNIC)
243		ret = -EADDRNOTAVAIL;
244
245	rdsdebug("addr %pI4 ret %d node type %d\n",
246		&addr, ret,
247		cm_id->device ? cm_id->device->node_type : -1);
248
249	rdma_destroy_id(cm_id);
250
251	return ret;
252}
253
254void rds_iw_exit(void)
255{
256	rds_info_deregister_func(RDS_INFO_IWARP_CONNECTIONS, rds_iw_ic_info);
257	rds_iw_destroy_nodev_conns();
258	ib_unregister_client(&rds_iw_client);
259	rds_iw_sysctl_exit();
260	rds_iw_recv_exit();
261	rds_trans_unregister(&rds_iw_transport);
262}
263
264struct rds_transport rds_iw_transport = {
265	.laddr_check		= rds_iw_laddr_check,
266	.xmit_complete		= rds_iw_xmit_complete,
267	.xmit			= rds_iw_xmit,
268	.xmit_rdma		= rds_iw_xmit_rdma,
269	.recv			= rds_iw_recv,
270	.conn_alloc		= rds_iw_conn_alloc,
271	.conn_free		= rds_iw_conn_free,
272	.conn_connect		= rds_iw_conn_connect,
273	.conn_shutdown		= rds_iw_conn_shutdown,
274	.inc_copy_to_user	= rds_iw_inc_copy_to_user,
275	.inc_free		= rds_iw_inc_free,
276	.cm_initiate_connect	= rds_iw_cm_initiate_connect,
277	.cm_handle_connect	= rds_iw_cm_handle_connect,
278	.cm_connect_complete	= rds_iw_cm_connect_complete,
279	.stats_info_copy	= rds_iw_stats_info_copy,
280	.exit			= rds_iw_exit,
281	.get_mr			= rds_iw_get_mr,
282	.sync_mr		= rds_iw_sync_mr,
283	.free_mr		= rds_iw_free_mr,
284	.flush_mrs		= rds_iw_flush_mrs,
285	.t_owner		= THIS_MODULE,
286	.t_name			= "iwarp",
287	.t_type			= RDS_TRANS_IWARP,
288	.t_prefer_loopback	= 1,
289};
290
291int rds_iw_init(void)
292{
293	int ret;
294
295	INIT_LIST_HEAD(&rds_iw_devices);
296
297	ret = ib_register_client(&rds_iw_client);
298	if (ret)
299		goto out;
300
301	ret = rds_iw_sysctl_init();
302	if (ret)
303		goto out_ibreg;
304
305	ret = rds_iw_recv_init();
306	if (ret)
307		goto out_sysctl;
308
309	ret = rds_trans_register(&rds_iw_transport);
310	if (ret)
311		goto out_recv;
312
313	rds_info_register_func(RDS_INFO_IWARP_CONNECTIONS, rds_iw_ic_info);
314
315	goto out;
316
317out_recv:
318	rds_iw_recv_exit();
319out_sysctl:
320	rds_iw_sysctl_exit();
321out_ibreg:
322	ib_unregister_client(&rds_iw_client);
323out:
324	return ret;
325}
326
327MODULE_LICENSE("GPL");
328
v3.1
  1/*
  2 * Copyright (c) 2006 Oracle.  All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 */
 33#include <linux/kernel.h>
 34#include <linux/in.h>
 35#include <linux/if.h>
 36#include <linux/netdevice.h>
 37#include <linux/inetdevice.h>
 38#include <linux/if_arp.h>
 39#include <linux/delay.h>
 40#include <linux/slab.h>
 
 41
 42#include "rds.h"
 43#include "iw.h"
 44
 45unsigned int fastreg_pool_size = RDS_FASTREG_POOL_SIZE;
 46unsigned int fastreg_message_size = RDS_FASTREG_SIZE + 1; /* +1 allows for unaligned MRs */
 47
 48module_param(fastreg_pool_size, int, 0444);
 49MODULE_PARM_DESC(fastreg_pool_size, " Max number of fastreg MRs per device");
 50module_param(fastreg_message_size, int, 0444);
 51MODULE_PARM_DESC(fastreg_message_size, " Max size of a RDMA transfer (fastreg MRs)");
 52
 53struct list_head rds_iw_devices;
 54
 55/* NOTE: if also grabbing iwdev lock, grab this first */
 56DEFINE_SPINLOCK(iw_nodev_conns_lock);
 57LIST_HEAD(iw_nodev_conns);
 58
 59static void rds_iw_add_one(struct ib_device *device)
 60{
 61	struct rds_iw_device *rds_iwdev;
 62	struct ib_device_attr *dev_attr;
 63
 64	/* Only handle iwarp devices */
 65	if (device->node_type != RDMA_NODE_RNIC)
 66		return;
 67
 68	dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
 69	if (!dev_attr)
 70		return;
 71
 72	if (ib_query_device(device, dev_attr)) {
 73		rdsdebug("Query device failed for %s\n", device->name);
 74		goto free_attr;
 75	}
 76
 77	rds_iwdev = kmalloc(sizeof *rds_iwdev, GFP_KERNEL);
 78	if (!rds_iwdev)
 79		goto free_attr;
 80
 81	spin_lock_init(&rds_iwdev->spinlock);
 82
 83	rds_iwdev->dma_local_lkey = !!(dev_attr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY);
 84	rds_iwdev->max_wrs = dev_attr->max_qp_wr;
 85	rds_iwdev->max_sge = min(dev_attr->max_sge, RDS_IW_MAX_SGE);
 86
 87	rds_iwdev->dev = device;
 88	rds_iwdev->pd = ib_alloc_pd(device);
 89	if (IS_ERR(rds_iwdev->pd))
 90		goto free_dev;
 91
 92	if (!rds_iwdev->dma_local_lkey) {
 93		rds_iwdev->mr = ib_get_dma_mr(rds_iwdev->pd,
 94					IB_ACCESS_REMOTE_READ |
 95					IB_ACCESS_REMOTE_WRITE |
 96					IB_ACCESS_LOCAL_WRITE);
 97		if (IS_ERR(rds_iwdev->mr))
 98			goto err_pd;
 99	} else
100		rds_iwdev->mr = NULL;
101
102	rds_iwdev->mr_pool = rds_iw_create_mr_pool(rds_iwdev);
103	if (IS_ERR(rds_iwdev->mr_pool)) {
104		rds_iwdev->mr_pool = NULL;
105		goto err_mr;
106	}
107
108	INIT_LIST_HEAD(&rds_iwdev->cm_id_list);
109	INIT_LIST_HEAD(&rds_iwdev->conn_list);
110	list_add_tail(&rds_iwdev->list, &rds_iw_devices);
111
112	ib_set_client_data(device, &rds_iw_client, rds_iwdev);
113
114	goto free_attr;
115
116err_mr:
117	if (rds_iwdev->mr)
118		ib_dereg_mr(rds_iwdev->mr);
119err_pd:
120	ib_dealloc_pd(rds_iwdev->pd);
121free_dev:
122	kfree(rds_iwdev);
123free_attr:
124	kfree(dev_attr);
125}
126
127static void rds_iw_remove_one(struct ib_device *device)
128{
129	struct rds_iw_device *rds_iwdev;
130	struct rds_iw_cm_id *i_cm_id, *next;
131
132	rds_iwdev = ib_get_client_data(device, &rds_iw_client);
133	if (!rds_iwdev)
134		return;
135
136	spin_lock_irq(&rds_iwdev->spinlock);
137	list_for_each_entry_safe(i_cm_id, next, &rds_iwdev->cm_id_list, list) {
138		list_del(&i_cm_id->list);
139		kfree(i_cm_id);
140	}
141	spin_unlock_irq(&rds_iwdev->spinlock);
142
143	rds_iw_destroy_conns(rds_iwdev);
144
145	if (rds_iwdev->mr_pool)
146		rds_iw_destroy_mr_pool(rds_iwdev->mr_pool);
147
148	if (rds_iwdev->mr)
149		ib_dereg_mr(rds_iwdev->mr);
150
151	while (ib_dealloc_pd(rds_iwdev->pd)) {
152		rdsdebug("Failed to dealloc pd %p\n", rds_iwdev->pd);
153		msleep(1);
154	}
155
156	list_del(&rds_iwdev->list);
157	kfree(rds_iwdev);
158}
159
160struct ib_client rds_iw_client = {
161	.name   = "rds_iw",
162	.add    = rds_iw_add_one,
163	.remove = rds_iw_remove_one
164};
165
166static int rds_iw_conn_info_visitor(struct rds_connection *conn,
167				    void *buffer)
168{
169	struct rds_info_rdma_connection *iinfo = buffer;
170	struct rds_iw_connection *ic;
171
172	/* We will only ever look at IB transports */
173	if (conn->c_trans != &rds_iw_transport)
174		return 0;
175
176	iinfo->src_addr = conn->c_laddr;
177	iinfo->dst_addr = conn->c_faddr;
178
179	memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
180	memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
181	if (rds_conn_state(conn) == RDS_CONN_UP) {
182		struct rds_iw_device *rds_iwdev;
183		struct rdma_dev_addr *dev_addr;
184
185		ic = conn->c_transport_data;
186		dev_addr = &ic->i_cm_id->route.addr.dev_addr;
187
188		rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
189		rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
190
191		rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
192		iinfo->max_send_wr = ic->i_send_ring.w_nr;
193		iinfo->max_recv_wr = ic->i_recv_ring.w_nr;
194		iinfo->max_send_sge = rds_iwdev->max_sge;
195		rds_iw_get_mr_info(rds_iwdev, iinfo);
196	}
197	return 1;
198}
199
200static void rds_iw_ic_info(struct socket *sock, unsigned int len,
201			   struct rds_info_iterator *iter,
202			   struct rds_info_lengths *lens)
203{
204	rds_for_each_conn_info(sock, len, iter, lens,
205				rds_iw_conn_info_visitor,
206				sizeof(struct rds_info_rdma_connection));
207}
208
209
210/*
211 * Early RDS/IB was built to only bind to an address if there is an IPoIB
212 * device with that address set.
213 *
214 * If it were me, I'd advocate for something more flexible.  Sending and
215 * receiving should be device-agnostic.  Transports would try and maintain
216 * connections between peers who have messages queued.  Userspace would be
217 * allowed to influence which paths have priority.  We could call userspace
218 * asserting this policy "routing".
219 */
220static int rds_iw_laddr_check(__be32 addr)
221{
222	int ret;
223	struct rdma_cm_id *cm_id;
224	struct sockaddr_in sin;
225
226	/* Create a CMA ID and try to bind it. This catches both
227	 * IB and iWARP capable NICs.
228	 */
229	cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
230	if (IS_ERR(cm_id))
231		return PTR_ERR(cm_id);
232
233	memset(&sin, 0, sizeof(sin));
234	sin.sin_family = AF_INET;
235	sin.sin_addr.s_addr = addr;
236
237	/* rdma_bind_addr will only succeed for IB & iWARP devices */
238	ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
239	/* due to this, we will claim to support IB devices unless we
240	   check node_type. */
241	if (ret || cm_id->device->node_type != RDMA_NODE_RNIC)
242		ret = -EADDRNOTAVAIL;
243
244	rdsdebug("addr %pI4 ret %d node type %d\n",
245		&addr, ret,
246		cm_id->device ? cm_id->device->node_type : -1);
247
248	rdma_destroy_id(cm_id);
249
250	return ret;
251}
252
253void rds_iw_exit(void)
254{
255	rds_info_deregister_func(RDS_INFO_IWARP_CONNECTIONS, rds_iw_ic_info);
256	rds_iw_destroy_nodev_conns();
257	ib_unregister_client(&rds_iw_client);
258	rds_iw_sysctl_exit();
259	rds_iw_recv_exit();
260	rds_trans_unregister(&rds_iw_transport);
261}
262
263struct rds_transport rds_iw_transport = {
264	.laddr_check		= rds_iw_laddr_check,
265	.xmit_complete		= rds_iw_xmit_complete,
266	.xmit			= rds_iw_xmit,
267	.xmit_rdma		= rds_iw_xmit_rdma,
268	.recv			= rds_iw_recv,
269	.conn_alloc		= rds_iw_conn_alloc,
270	.conn_free		= rds_iw_conn_free,
271	.conn_connect		= rds_iw_conn_connect,
272	.conn_shutdown		= rds_iw_conn_shutdown,
273	.inc_copy_to_user	= rds_iw_inc_copy_to_user,
274	.inc_free		= rds_iw_inc_free,
275	.cm_initiate_connect	= rds_iw_cm_initiate_connect,
276	.cm_handle_connect	= rds_iw_cm_handle_connect,
277	.cm_connect_complete	= rds_iw_cm_connect_complete,
278	.stats_info_copy	= rds_iw_stats_info_copy,
279	.exit			= rds_iw_exit,
280	.get_mr			= rds_iw_get_mr,
281	.sync_mr		= rds_iw_sync_mr,
282	.free_mr		= rds_iw_free_mr,
283	.flush_mrs		= rds_iw_flush_mrs,
284	.t_owner		= THIS_MODULE,
285	.t_name			= "iwarp",
286	.t_type			= RDS_TRANS_IWARP,
287	.t_prefer_loopback	= 1,
288};
289
290int rds_iw_init(void)
291{
292	int ret;
293
294	INIT_LIST_HEAD(&rds_iw_devices);
295
296	ret = ib_register_client(&rds_iw_client);
297	if (ret)
298		goto out;
299
300	ret = rds_iw_sysctl_init();
301	if (ret)
302		goto out_ibreg;
303
304	ret = rds_iw_recv_init();
305	if (ret)
306		goto out_sysctl;
307
308	ret = rds_trans_register(&rds_iw_transport);
309	if (ret)
310		goto out_recv;
311
312	rds_info_register_func(RDS_INFO_IWARP_CONNECTIONS, rds_iw_ic_info);
313
314	goto out;
315
316out_recv:
317	rds_iw_recv_exit();
318out_sysctl:
319	rds_iw_sysctl_exit();
320out_ibreg:
321	ib_unregister_client(&rds_iw_client);
322out:
323	return ret;
324}
325
326MODULE_LICENSE("GPL");
327