Linux Audio

Check our new training course

Loading...
  1/*
  2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 */
 32#include <linux/module.h>
 33#include <linux/moduleparam.h>
 34#include <linux/device.h>
 35#include <linux/netdevice.h>
 36#include <linux/etherdevice.h>
 37#include <linux/delay.h>
 38#include <linux/errno.h>
 39#include <linux/list.h>
 40#include <linux/spinlock.h>
 41#include <linux/ethtool.h>
 42#include <linux/rtnetlink.h>
 43#include <linux/inetdevice.h>
 44#include <linux/io.h>
 45
 46#include <asm/irq.h>
 47#include <asm/byteorder.h>
 48
 49#include <rdma/iw_cm.h>
 50#include <rdma/ib_verbs.h>
 51#include <rdma/ib_smi.h>
 52#include <rdma/ib_umem.h>
 53#include <rdma/ib_user_verbs.h>
 54
 55#include "iw_cxgb4.h"
 56
 57static int fastreg_support = 1;
 58module_param(fastreg_support, int, 0644);
 59MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
 60
 61static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
 62				    struct ib_ah_attr *ah_attr)
 63{
 64	return ERR_PTR(-ENOSYS);
 65}
 66
 67static int c4iw_ah_destroy(struct ib_ah *ah)
 68{
 69	return -ENOSYS;
 70}
 71
 72static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 73{
 74	return -ENOSYS;
 75}
 76
 77static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 78{
 79	return -ENOSYS;
 80}
 81
 82static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
 83			    u8 port_num, struct ib_wc *in_wc,
 84			    struct ib_grh *in_grh, struct ib_mad *in_mad,
 85			    struct ib_mad *out_mad)
 86{
 87	return -ENOSYS;
 88}
 89
 90static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
 91{
 92	struct c4iw_dev *rhp = to_c4iw_dev(context->device);
 93	struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
 94	struct c4iw_mm_entry *mm, *tmp;
 95
 96	PDBG("%s context %p\n", __func__, context);
 97	list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
 98		kfree(mm);
 99	c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
100	kfree(ucontext);
101	return 0;
102}
103
104static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
105					       struct ib_udata *udata)
106{
107	struct c4iw_ucontext *context;
108	struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
109
110	PDBG("%s ibdev %p\n", __func__, ibdev);
111	context = kzalloc(sizeof(*context), GFP_KERNEL);
112	if (!context)
113		return ERR_PTR(-ENOMEM);
114	c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
115	INIT_LIST_HEAD(&context->mmaps);
116	spin_lock_init(&context->mmap_lock);
117	return &context->ibucontext;
118}
119
120static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
121{
122	int len = vma->vm_end - vma->vm_start;
123	u32 key = vma->vm_pgoff << PAGE_SHIFT;
124	struct c4iw_rdev *rdev;
125	int ret = 0;
126	struct c4iw_mm_entry *mm;
127	struct c4iw_ucontext *ucontext;
128	u64 addr;
129
130	PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
131	     key, len);
132
133	if (vma->vm_start & (PAGE_SIZE-1))
134		return -EINVAL;
135
136	rdev = &(to_c4iw_dev(context->device)->rdev);
137	ucontext = to_c4iw_ucontext(context);
138
139	mm = remove_mmap(ucontext, key, len);
140	if (!mm)
141		return -EINVAL;
142	addr = mm->addr;
143	kfree(mm);
144
145	if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) &&
146	    (addr < (pci_resource_start(rdev->lldi.pdev, 0) +
147		    pci_resource_len(rdev->lldi.pdev, 0)))) {
148
149		/*
150		 * MA_SYNC register...
151		 */
152		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
153		ret = io_remap_pfn_range(vma, vma->vm_start,
154					 addr >> PAGE_SHIFT,
155					 len, vma->vm_page_prot);
156	} else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
157		   (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
158		    pci_resource_len(rdev->lldi.pdev, 2)))) {
159
160		/*
161		 * Map user DB or OCQP memory...
162		 */
163		if (addr >= rdev->oc_mw_pa)
164			vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
165		else
166			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
167		ret = io_remap_pfn_range(vma, vma->vm_start,
168					 addr >> PAGE_SHIFT,
169					 len, vma->vm_page_prot);
170	} else {
171
172		/*
173		 * Map WQ or CQ contig dma memory...
174		 */
175		ret = remap_pfn_range(vma, vma->vm_start,
176				      addr >> PAGE_SHIFT,
177				      len, vma->vm_page_prot);
178	}
179
180	return ret;
181}
182
183static int c4iw_deallocate_pd(struct ib_pd *pd)
184{
185	struct c4iw_dev *rhp;
186	struct c4iw_pd *php;
187
188	php = to_c4iw_pd(pd);
189	rhp = php->rhp;
190	PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
191	c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
192	mutex_lock(&rhp->rdev.stats.lock);
193	rhp->rdev.stats.pd.cur--;
194	mutex_unlock(&rhp->rdev.stats.lock);
195	kfree(php);
196	return 0;
197}
198
199static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
200				      struct ib_ucontext *context,
201				      struct ib_udata *udata)
202{
203	struct c4iw_pd *php;
204	u32 pdid;
205	struct c4iw_dev *rhp;
206
207	PDBG("%s ibdev %p\n", __func__, ibdev);
208	rhp = (struct c4iw_dev *) ibdev;
209	pdid =  c4iw_get_resource(&rhp->rdev.resource.pdid_table);
210	if (!pdid)
211		return ERR_PTR(-EINVAL);
212	php = kzalloc(sizeof(*php), GFP_KERNEL);
213	if (!php) {
214		c4iw_put_resource(&rhp->rdev.resource.pdid_table, pdid);
215		return ERR_PTR(-ENOMEM);
216	}
217	php->pdid = pdid;
218	php->rhp = rhp;
219	if (context) {
220		if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
221			c4iw_deallocate_pd(&php->ibpd);
222			return ERR_PTR(-EFAULT);
223		}
224	}
225	mutex_lock(&rhp->rdev.stats.lock);
226	rhp->rdev.stats.pd.cur++;
227	if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
228		rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
229	mutex_unlock(&rhp->rdev.stats.lock);
230	PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
231	return &php->ibpd;
232}
233
234static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
235			   u16 *pkey)
236{
237	PDBG("%s ibdev %p\n", __func__, ibdev);
238	*pkey = 0;
239	return 0;
240}
241
242static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
243			  union ib_gid *gid)
244{
245	struct c4iw_dev *dev;
246
247	PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
248	       __func__, ibdev, port, index, gid);
249	dev = to_c4iw_dev(ibdev);
250	BUG_ON(port == 0);
251	memset(&(gid->raw[0]), 0, sizeof(gid->raw));
252	memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6);
253	return 0;
254}
255
256static int c4iw_query_device(struct ib_device *ibdev,
257			     struct ib_device_attr *props)
258{
259
260	struct c4iw_dev *dev;
261	PDBG("%s ibdev %p\n", __func__, ibdev);
262
263	dev = to_c4iw_dev(ibdev);
264	memset(props, 0, sizeof *props);
265	memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
266	props->hw_ver = dev->rdev.lldi.adapter_type;
267	props->fw_ver = dev->rdev.lldi.fw_vers;
268	props->device_cap_flags = dev->device_cap_flags;
269	props->page_size_cap = T4_PAGESIZE_MASK;
270	props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
271	props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
272	props->max_mr_size = T4_MAX_MR_SIZE;
273	props->max_qp = T4_MAX_NUM_QP;
274	props->max_qp_wr = T4_MAX_QP_DEPTH;
275	props->max_sge = T4_MAX_RECV_SGE;
276	props->max_sge_rd = 1;
277	props->max_qp_rd_atom = c4iw_max_read_depth;
278	props->max_qp_init_rd_atom = c4iw_max_read_depth;
279	props->max_cq = T4_MAX_NUM_CQ;
280	props->max_cqe = T4_MAX_CQ_DEPTH;
281	props->max_mr = c4iw_num_stags(&dev->rdev);
282	props->max_pd = T4_MAX_NUM_PD;
283	props->local_ca_ack_delay = 0;
284	props->max_fast_reg_page_list_len = T4_MAX_FR_DEPTH;
285
286	return 0;
287}
288
289static int c4iw_query_port(struct ib_device *ibdev, u8 port,
290			   struct ib_port_attr *props)
291{
292	struct c4iw_dev *dev;
293	struct net_device *netdev;
294	struct in_device *inetdev;
295
296	PDBG("%s ibdev %p\n", __func__, ibdev);
297
298	dev = to_c4iw_dev(ibdev);
299	netdev = dev->rdev.lldi.ports[port-1];
300
301	memset(props, 0, sizeof(struct ib_port_attr));
302	props->max_mtu = IB_MTU_4096;
303	if (netdev->mtu >= 4096)
304		props->active_mtu = IB_MTU_4096;
305	else if (netdev->mtu >= 2048)
306		props->active_mtu = IB_MTU_2048;
307	else if (netdev->mtu >= 1024)
308		props->active_mtu = IB_MTU_1024;
309	else if (netdev->mtu >= 512)
310		props->active_mtu = IB_MTU_512;
311	else
312		props->active_mtu = IB_MTU_256;
313
314	if (!netif_carrier_ok(netdev))
315		props->state = IB_PORT_DOWN;
316	else {
317		inetdev = in_dev_get(netdev);
318		if (inetdev) {
319			if (inetdev->ifa_list)
320				props->state = IB_PORT_ACTIVE;
321			else
322				props->state = IB_PORT_INIT;
323			in_dev_put(inetdev);
324		} else
325			props->state = IB_PORT_INIT;
326	}
327
328	props->port_cap_flags =
329	    IB_PORT_CM_SUP |
330	    IB_PORT_SNMP_TUNNEL_SUP |
331	    IB_PORT_REINIT_SUP |
332	    IB_PORT_DEVICE_MGMT_SUP |
333	    IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
334	props->gid_tbl_len = 1;
335	props->pkey_tbl_len = 1;
336	props->active_width = 2;
337	props->active_speed = IB_SPEED_DDR;
338	props->max_msg_sz = -1;
339
340	return 0;
341}
342
343static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
344			char *buf)
345{
346	struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
347						 ibdev.dev);
348	PDBG("%s dev 0x%p\n", __func__, dev);
349	return sprintf(buf, "%d\n", c4iw_dev->rdev.lldi.adapter_type);
350}
351
352static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
353			   char *buf)
354{
355	struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
356						 ibdev.dev);
357	PDBG("%s dev 0x%p\n", __func__, dev);
358
359	return sprintf(buf, "%u.%u.%u.%u\n",
360			FW_HDR_FW_VER_MAJOR_GET(c4iw_dev->rdev.lldi.fw_vers),
361			FW_HDR_FW_VER_MINOR_GET(c4iw_dev->rdev.lldi.fw_vers),
362			FW_HDR_FW_VER_MICRO_GET(c4iw_dev->rdev.lldi.fw_vers),
363			FW_HDR_FW_VER_BUILD_GET(c4iw_dev->rdev.lldi.fw_vers));
364}
365
366static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
367			char *buf)
368{
369	struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
370						 ibdev.dev);
371	struct ethtool_drvinfo info;
372	struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
373
374	PDBG("%s dev 0x%p\n", __func__, dev);
375	lldev->ethtool_ops->get_drvinfo(lldev, &info);
376	return sprintf(buf, "%s\n", info.driver);
377}
378
379static ssize_t show_board(struct device *dev, struct device_attribute *attr,
380			  char *buf)
381{
382	struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
383						 ibdev.dev);
384	PDBG("%s dev 0x%p\n", __func__, dev);
385	return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
386		       c4iw_dev->rdev.lldi.pdev->device);
387}
388
389static int c4iw_get_mib(struct ib_device *ibdev,
390			union rdma_protocol_stats *stats)
391{
392	struct tp_tcp_stats v4, v6;
393	struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev);
394
395	cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6);
396	memset(stats, 0, sizeof *stats);
397	stats->iw.tcpInSegs = v4.tcpInSegs + v6.tcpInSegs;
398	stats->iw.tcpOutSegs = v4.tcpOutSegs + v6.tcpOutSegs;
399	stats->iw.tcpRetransSegs = v4.tcpRetransSegs + v6.tcpRetransSegs;
400	stats->iw.tcpOutRsts = v4.tcpOutRsts + v6.tcpOutSegs;
401
402	return 0;
403}
404
405static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
406static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
407static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
408static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
409
410static struct device_attribute *c4iw_class_attributes[] = {
411	&dev_attr_hw_rev,
412	&dev_attr_fw_ver,
413	&dev_attr_hca_type,
414	&dev_attr_board_id,
415};
416
417int c4iw_register_device(struct c4iw_dev *dev)
418{
419	int ret;
420	int i;
421
422	PDBG("%s c4iw_dev %p\n", __func__, dev);
423	BUG_ON(!dev->rdev.lldi.ports[0]);
424	strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
425	memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
426	memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
427	dev->ibdev.owner = THIS_MODULE;
428	dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
429	if (fastreg_support)
430		dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
431	dev->ibdev.local_dma_lkey = 0;
432	dev->ibdev.uverbs_cmd_mask =
433	    (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
434	    (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
435	    (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
436	    (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
437	    (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
438	    (1ull << IB_USER_VERBS_CMD_REG_MR) |
439	    (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
440	    (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
441	    (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
442	    (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
443	    (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
444	    (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
445	    (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
446	    (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
447	    (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
448	    (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
449	    (1ull << IB_USER_VERBS_CMD_POST_SEND) |
450	    (1ull << IB_USER_VERBS_CMD_POST_RECV);
451	dev->ibdev.node_type = RDMA_NODE_RNIC;
452	memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
453	dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
454	dev->ibdev.num_comp_vectors = 1;
455	dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev);
456	dev->ibdev.query_device = c4iw_query_device;
457	dev->ibdev.query_port = c4iw_query_port;
458	dev->ibdev.query_pkey = c4iw_query_pkey;
459	dev->ibdev.query_gid = c4iw_query_gid;
460	dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext;
461	dev->ibdev.dealloc_ucontext = c4iw_dealloc_ucontext;
462	dev->ibdev.mmap = c4iw_mmap;
463	dev->ibdev.alloc_pd = c4iw_allocate_pd;
464	dev->ibdev.dealloc_pd = c4iw_deallocate_pd;
465	dev->ibdev.create_ah = c4iw_ah_create;
466	dev->ibdev.destroy_ah = c4iw_ah_destroy;
467	dev->ibdev.create_qp = c4iw_create_qp;
468	dev->ibdev.modify_qp = c4iw_ib_modify_qp;
469	dev->ibdev.query_qp = c4iw_ib_query_qp;
470	dev->ibdev.destroy_qp = c4iw_destroy_qp;
471	dev->ibdev.create_cq = c4iw_create_cq;
472	dev->ibdev.destroy_cq = c4iw_destroy_cq;
473	dev->ibdev.resize_cq = c4iw_resize_cq;
474	dev->ibdev.poll_cq = c4iw_poll_cq;
475	dev->ibdev.get_dma_mr = c4iw_get_dma_mr;
476	dev->ibdev.reg_phys_mr = c4iw_register_phys_mem;
477	dev->ibdev.rereg_phys_mr = c4iw_reregister_phys_mem;
478	dev->ibdev.reg_user_mr = c4iw_reg_user_mr;
479	dev->ibdev.dereg_mr = c4iw_dereg_mr;
480	dev->ibdev.alloc_mw = c4iw_alloc_mw;
481	dev->ibdev.bind_mw = c4iw_bind_mw;
482	dev->ibdev.dealloc_mw = c4iw_dealloc_mw;
483	dev->ibdev.alloc_fast_reg_mr = c4iw_alloc_fast_reg_mr;
484	dev->ibdev.alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl;
485	dev->ibdev.free_fast_reg_page_list = c4iw_free_fastreg_pbl;
486	dev->ibdev.attach_mcast = c4iw_multicast_attach;
487	dev->ibdev.detach_mcast = c4iw_multicast_detach;
488	dev->ibdev.process_mad = c4iw_process_mad;
489	dev->ibdev.req_notify_cq = c4iw_arm_cq;
490	dev->ibdev.post_send = c4iw_post_send;
491	dev->ibdev.post_recv = c4iw_post_receive;
492	dev->ibdev.get_protocol_stats = c4iw_get_mib;
493	dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
494
495	dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
496	if (!dev->ibdev.iwcm)
497		return -ENOMEM;
498
499	dev->ibdev.iwcm->connect = c4iw_connect;
500	dev->ibdev.iwcm->accept = c4iw_accept_cr;
501	dev->ibdev.iwcm->reject = c4iw_reject_cr;
502	dev->ibdev.iwcm->create_listen = c4iw_create_listen;
503	dev->ibdev.iwcm->destroy_listen = c4iw_destroy_listen;
504	dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
505	dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
506	dev->ibdev.iwcm->get_qp = c4iw_get_qp;
507
508	ret = ib_register_device(&dev->ibdev, NULL);
509	if (ret)
510		goto bail1;
511
512	for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
513		ret = device_create_file(&dev->ibdev.dev,
514					 c4iw_class_attributes[i]);
515		if (ret)
516			goto bail2;
517	}
518	return 0;
519bail2:
520	ib_unregister_device(&dev->ibdev);
521bail1:
522	kfree(dev->ibdev.iwcm);
523	return ret;
524}
525
526void c4iw_unregister_device(struct c4iw_dev *dev)
527{
528	int i;
529
530	PDBG("%s c4iw_dev %p\n", __func__, dev);
531	for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
532		device_remove_file(&dev->ibdev.dev,
533				   c4iw_class_attributes[i]);
534	ib_unregister_device(&dev->ibdev);
535	kfree(dev->ibdev.iwcm);
536	return;
537}