Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v6.8
  1/*
  2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 * Author: Upinder Malhi <umalhi@cisco.com>
 33 * Author: Anant Deepak <anadeepa@cisco.com>
 34 * Author: Cesare Cantu' <cantuc@cisco.com>
 35 * Author: Jeff Squyres <jsquyres@cisco.com>
 36 * Author: Kiran Thirumalai <kithirum@cisco.com>
 37 * Author: Xuyang Wang <xuywang@cisco.com>
 38 * Author: Reese Faucette <rfaucett@cisco.com>
 39 *
 40 */
 41
 42#include <linux/module.h>
 43#include <linux/inetdevice.h>
 44#include <linux/init.h>
 45#include <linux/slab.h>
 46#include <linux/errno.h>
 47#include <linux/pci.h>
 48#include <linux/netdevice.h>
 49
 50#include <rdma/ib_user_verbs.h>
 51#include <rdma/ib_addr.h>
 52
 53#include "usnic_abi.h"
 54#include "usnic_common_util.h"
 55#include "usnic_ib.h"
 56#include "usnic_ib_qp_grp.h"
 57#include "usnic_log.h"
 58#include "usnic_fwd.h"
 59#include "usnic_debugfs.h"
 60#include "usnic_ib_verbs.h"
 61#include "usnic_transport.h"
 62#include "usnic_uiom.h"
 63#include "usnic_ib_sysfs.h"
 64
 65unsigned int usnic_log_lvl = USNIC_LOG_LVL_ERR;
 66unsigned int usnic_ib_share_vf = 1;
 67
 68static const char usnic_version[] =
 69	DRV_NAME ": Cisco VIC (USNIC) Verbs Driver v"
 70	DRV_VERSION " (" DRV_RELDATE ")\n";
 71
 72static DEFINE_MUTEX(usnic_ib_ibdev_list_lock);
 73static LIST_HEAD(usnic_ib_ibdev_list);
 74
 75/* Callback dump funcs */
 76static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz)
 77{
 78	struct usnic_ib_vf *vf = obj;
 79	return scnprintf(buf, buf_sz, "PF: %s ", dev_name(&vf->pf->ib_dev.dev));
 80}
 81/* End callback dump funcs */
 82
 83static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz)
 84{
 85	usnic_vnic_dump(vf->vnic, buf, buf_sz, vf,
 86			usnic_ib_dump_vf_hdr,
 87			usnic_ib_qp_grp_dump_hdr, usnic_ib_qp_grp_dump_rows);
 88}
 89
 90void usnic_ib_log_vf(struct usnic_ib_vf *vf)
 91{
 92	char *buf = kzalloc(1000, GFP_KERNEL);
 93
 94	if (!buf)
 95		return;
 96
 97	usnic_ib_dump_vf(vf, buf, 1000);
 98	usnic_dbg("%s\n", buf);
 99
100	kfree(buf);
101}
102
103/* Start of netdev section */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev)
105{
106	struct usnic_ib_ucontext *ctx;
107	struct usnic_ib_qp_grp *qp_grp;
108	enum ib_qp_state cur_state;
109	int status;
110
111	BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
112
113	list_for_each_entry(ctx, &us_ibdev->ctx_list, link) {
114		list_for_each_entry(qp_grp, &ctx->qp_grp_list, link) {
115			cur_state = qp_grp->state;
116			if (cur_state == IB_QPS_INIT ||
117				cur_state == IB_QPS_RTR ||
118				cur_state == IB_QPS_RTS) {
119				status = usnic_ib_qp_grp_modify(qp_grp,
120								IB_QPS_ERR,
121								NULL);
122				if (status) {
123					usnic_err("Failed to transition qp grp %u from %s to %s\n",
124						qp_grp->grp_id,
125						usnic_ib_qp_grp_state_to_string
126						(cur_state),
127						usnic_ib_qp_grp_state_to_string
128						(IB_QPS_ERR));
129				}
130			}
131		}
132	}
133}
134
135static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
136					unsigned long event)
137{
138	struct net_device *netdev;
139	struct ib_event ib_event;
140
141	memset(&ib_event, 0, sizeof(ib_event));
142
143	mutex_lock(&us_ibdev->usdev_lock);
144	netdev = us_ibdev->netdev;
145	switch (event) {
146	case NETDEV_REBOOT:
147		usnic_info("PF Reset on %s\n", dev_name(&us_ibdev->ib_dev.dev));
148		usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
149		ib_event.event = IB_EVENT_PORT_ERR;
150		ib_event.device = &us_ibdev->ib_dev;
151		ib_event.element.port_num = 1;
152		ib_dispatch_event(&ib_event);
153		break;
154	case NETDEV_UP:
155	case NETDEV_DOWN:
156	case NETDEV_CHANGE:
157		if (!us_ibdev->ufdev->link_up &&
158				netif_carrier_ok(netdev)) {
159			usnic_fwd_carrier_up(us_ibdev->ufdev);
160			usnic_info("Link UP on %s\n",
161				   dev_name(&us_ibdev->ib_dev.dev));
162			ib_event.event = IB_EVENT_PORT_ACTIVE;
163			ib_event.device = &us_ibdev->ib_dev;
164			ib_event.element.port_num = 1;
165			ib_dispatch_event(&ib_event);
166		} else if (us_ibdev->ufdev->link_up &&
167				!netif_carrier_ok(netdev)) {
168			usnic_fwd_carrier_down(us_ibdev->ufdev);
169			usnic_info("Link DOWN on %s\n",
170				   dev_name(&us_ibdev->ib_dev.dev));
171			usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
172			ib_event.event = IB_EVENT_PORT_ERR;
173			ib_event.device = &us_ibdev->ib_dev;
174			ib_event.element.port_num = 1;
175			ib_dispatch_event(&ib_event);
176		} else {
177			usnic_dbg("Ignoring %s on %s\n",
178					netdev_cmd_to_name(event),
179					dev_name(&us_ibdev->ib_dev.dev));
180		}
181		break;
182	case NETDEV_CHANGEADDR:
183		if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
184				sizeof(us_ibdev->ufdev->mac))) {
185			usnic_dbg("Ignoring addr change on %s\n",
186				  dev_name(&us_ibdev->ib_dev.dev));
187		} else {
188			usnic_info(" %s old mac: %pM new mac: %pM\n",
189					dev_name(&us_ibdev->ib_dev.dev),
190					us_ibdev->ufdev->mac,
191					netdev->dev_addr);
192			usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr);
193			usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
194			ib_event.event = IB_EVENT_GID_CHANGE;
195			ib_event.device = &us_ibdev->ib_dev;
196			ib_event.element.port_num = 1;
197			ib_dispatch_event(&ib_event);
198		}
199
200		break;
201	case NETDEV_CHANGEMTU:
202		if (us_ibdev->ufdev->mtu != netdev->mtu) {
203			usnic_info("MTU Change on %s old: %u new: %u\n",
204					dev_name(&us_ibdev->ib_dev.dev),
205					us_ibdev->ufdev->mtu, netdev->mtu);
206			usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu);
207			usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
208		} else {
209			usnic_dbg("Ignoring MTU change on %s\n",
210				  dev_name(&us_ibdev->ib_dev.dev));
211		}
212		break;
213	default:
214		usnic_dbg("Ignoring event %s on %s",
215				netdev_cmd_to_name(event),
216				dev_name(&us_ibdev->ib_dev.dev));
217	}
218	mutex_unlock(&us_ibdev->usdev_lock);
219}
220
221static int usnic_ib_netdevice_event(struct notifier_block *notifier,
222					unsigned long event, void *ptr)
223{
224	struct usnic_ib_dev *us_ibdev;
225	struct ib_device *ibdev;
226
227	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
228
229	ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_USNIC);
230	if (!ibdev)
231		return NOTIFY_DONE;
232
233	us_ibdev = container_of(ibdev, struct usnic_ib_dev, ib_dev);
234	usnic_ib_handle_usdev_event(us_ibdev, event);
235	ib_device_put(ibdev);
 
 
236	return NOTIFY_DONE;
237}
238
239static struct notifier_block usnic_ib_netdevice_notifier = {
240	.notifier_call = usnic_ib_netdevice_event
241};
242/* End of netdev section */
243
244/* Start of inet section */
245static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
246					unsigned long event, void *ptr)
247{
248	struct in_ifaddr *ifa = ptr;
249	struct ib_event ib_event;
250
251	mutex_lock(&us_ibdev->usdev_lock);
252
253	switch (event) {
254	case NETDEV_DOWN:
255		usnic_info("%s via ip notifiers",
256				netdev_cmd_to_name(event));
257		usnic_fwd_del_ipaddr(us_ibdev->ufdev);
258		usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
259		ib_event.event = IB_EVENT_GID_CHANGE;
260		ib_event.device = &us_ibdev->ib_dev;
261		ib_event.element.port_num = 1;
262		ib_dispatch_event(&ib_event);
263		break;
264	case NETDEV_UP:
265		usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address);
266		usnic_info("%s via ip notifiers: ip %pI4",
267				netdev_cmd_to_name(event),
268				&us_ibdev->ufdev->inaddr);
269		ib_event.event = IB_EVENT_GID_CHANGE;
270		ib_event.device = &us_ibdev->ib_dev;
271		ib_event.element.port_num = 1;
272		ib_dispatch_event(&ib_event);
273		break;
274	default:
275		usnic_info("Ignoring event %s on %s",
276				netdev_cmd_to_name(event),
277				dev_name(&us_ibdev->ib_dev.dev));
278	}
279	mutex_unlock(&us_ibdev->usdev_lock);
280
281	return NOTIFY_DONE;
282}
283
284static int usnic_ib_inetaddr_event(struct notifier_block *notifier,
285					unsigned long event, void *ptr)
286{
287	struct usnic_ib_dev *us_ibdev;
288	struct in_ifaddr *ifa = ptr;
289	struct net_device *netdev = ifa->ifa_dev->dev;
290	struct ib_device *ibdev;
291
292	ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_USNIC);
293	if (!ibdev)
294		return NOTIFY_DONE;
295
296	us_ibdev = container_of(ibdev, struct usnic_ib_dev, ib_dev);
297	usnic_ib_handle_inet_event(us_ibdev, event, ptr);
298	ib_device_put(ibdev);
 
 
299	return NOTIFY_DONE;
300}
301static struct notifier_block usnic_ib_inetaddr_notifier = {
302	.notifier_call = usnic_ib_inetaddr_event
303};
304/* End of inet section*/
305
306static int usnic_port_immutable(struct ib_device *ibdev, u32 port_num,
307			        struct ib_port_immutable *immutable)
308{
309	struct ib_port_attr attr;
310	int err;
311
312	immutable->core_cap_flags = RDMA_CORE_PORT_USNIC;
313
314	err = ib_query_port(ibdev, port_num, &attr);
315	if (err)
316		return err;
317
 
318	immutable->gid_tbl_len = attr.gid_tbl_len;
319
320	return 0;
321}
322
323static void usnic_get_dev_fw_str(struct ib_device *device, char *str)
324{
325	struct usnic_ib_dev *us_ibdev =
326		container_of(device, struct usnic_ib_dev, ib_dev);
327	struct ethtool_drvinfo info;
328
329	mutex_lock(&us_ibdev->usdev_lock);
330	us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
331	mutex_unlock(&us_ibdev->usdev_lock);
332
333	snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version);
334}
335
336static const struct ib_device_ops usnic_dev_ops = {
337	.owner = THIS_MODULE,
338	.driver_id = RDMA_DRIVER_USNIC,
339	.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION,
340
341	.alloc_pd = usnic_ib_alloc_pd,
342	.alloc_ucontext = usnic_ib_alloc_ucontext,
343	.create_cq = usnic_ib_create_cq,
344	.create_qp = usnic_ib_create_qp,
345	.dealloc_pd = usnic_ib_dealloc_pd,
346	.dealloc_ucontext = usnic_ib_dealloc_ucontext,
347	.dereg_mr = usnic_ib_dereg_mr,
348	.destroy_cq = usnic_ib_destroy_cq,
349	.destroy_qp = usnic_ib_destroy_qp,
350	.device_group = &usnic_attr_group,
351	.get_dev_fw_str = usnic_get_dev_fw_str,
352	.get_link_layer = usnic_ib_port_link_layer,
353	.get_port_immutable = usnic_port_immutable,
354	.mmap = usnic_ib_mmap,
355	.modify_qp = usnic_ib_modify_qp,
356	.query_device = usnic_ib_query_device,
357	.query_gid = usnic_ib_query_gid,
358	.query_port = usnic_ib_query_port,
359	.query_qp = usnic_ib_query_qp,
360	.reg_user_mr = usnic_ib_reg_mr,
361	INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd),
362	INIT_RDMA_OBJ_SIZE(ib_cq, usnic_ib_cq, ibcq),
363	INIT_RDMA_OBJ_SIZE(ib_qp, usnic_ib_qp_grp, ibqp),
364	INIT_RDMA_OBJ_SIZE(ib_ucontext, usnic_ib_ucontext, ibucontext),
365};
366
367/* Start of PF discovery section */
368static void *usnic_ib_device_add(struct pci_dev *dev)
369{
370	struct usnic_ib_dev *us_ibdev;
371	union ib_gid gid;
372	struct in_device *ind;
373	struct net_device *netdev;
374	int ret;
375
376	usnic_dbg("\n");
377	netdev = pci_get_drvdata(dev);
378
379	us_ibdev = ib_alloc_device(usnic_ib_dev, ib_dev);
380	if (!us_ibdev) {
381		usnic_err("Device %s context alloc failed\n",
382				netdev_name(pci_get_drvdata(dev)));
383		return ERR_PTR(-EFAULT);
384	}
385
386	us_ibdev->ufdev = usnic_fwd_dev_alloc(dev);
387	if (!us_ibdev->ufdev) {
388		usnic_err("Failed to alloc ufdev for %s\n", pci_name(dev));
389		goto err_dealloc;
390	}
391
392	mutex_init(&us_ibdev->usdev_lock);
393	INIT_LIST_HEAD(&us_ibdev->vf_dev_list);
394	INIT_LIST_HEAD(&us_ibdev->ctx_list);
395
396	us_ibdev->pdev = dev;
397	us_ibdev->netdev = pci_get_drvdata(dev);
 
398	us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP;
399	us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT;
400	us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS;
401	us_ibdev->ib_dev.dev.parent = &dev->dev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
402
403	ib_set_device_ops(&us_ibdev->ib_dev, &usnic_dev_ops);
404
405	ret = ib_device_set_netdev(&us_ibdev->ib_dev, us_ibdev->netdev, 1);
406	if (ret)
407		goto err_fwd_dealloc;
408
409	dma_set_max_seg_size(&dev->dev, SZ_2G);
410	if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d", &dev->dev))
411		goto err_fwd_dealloc;
412
413	usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu);
414	usnic_fwd_set_mac(us_ibdev->ufdev, us_ibdev->netdev->dev_addr);
415	if (netif_carrier_ok(us_ibdev->netdev))
416		usnic_fwd_carrier_up(us_ibdev->ufdev);
417
418	rcu_read_lock();
419	ind = __in_dev_get_rcu(netdev);
420	if (ind) {
421		const struct in_ifaddr *ifa;
422
423		ifa = rcu_dereference(ind->ifa_list);
424		if (ifa)
425			usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address);
426	}
427	rcu_read_unlock();
428
429	usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr,
430				us_ibdev->ufdev->inaddr, &gid.raw[0]);
431	memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id,
432		sizeof(gid.global.interface_id));
433	kref_init(&us_ibdev->vf_cnt);
434
435	usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n",
436		   dev_name(&us_ibdev->ib_dev.dev),
437		   netdev_name(us_ibdev->netdev), us_ibdev->ufdev->mac,
438		   us_ibdev->ufdev->link_up, us_ibdev->ufdev->mtu);
439	return us_ibdev;
440
441err_fwd_dealloc:
442	usnic_fwd_dev_free(us_ibdev->ufdev);
443err_dealloc:
444	usnic_err("failed -- deallocing device\n");
445	ib_dealloc_device(&us_ibdev->ib_dev);
446	return NULL;
447}
448
449static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev)
450{
451	usnic_info("Unregistering %s\n", dev_name(&us_ibdev->ib_dev.dev));
452	usnic_ib_sysfs_unregister_usdev(us_ibdev);
453	usnic_fwd_dev_free(us_ibdev->ufdev);
454	ib_unregister_device(&us_ibdev->ib_dev);
455	ib_dealloc_device(&us_ibdev->ib_dev);
456}
457
458static void usnic_ib_undiscover_pf(struct kref *kref)
459{
460	struct usnic_ib_dev *us_ibdev, *tmp;
461	struct pci_dev *dev;
462	bool found = false;
463
464	dev = container_of(kref, struct usnic_ib_dev, vf_cnt)->pdev;
465	mutex_lock(&usnic_ib_ibdev_list_lock);
466	list_for_each_entry_safe(us_ibdev, tmp,
467				&usnic_ib_ibdev_list, ib_dev_link) {
468		if (us_ibdev->pdev == dev) {
469			list_del(&us_ibdev->ib_dev_link);
 
470			found = true;
471			break;
472		}
473	}
474
 
475
476	mutex_unlock(&usnic_ib_ibdev_list_lock);
477	if (found)
478		usnic_ib_device_remove(us_ibdev);
479	else
480		WARN(1, "Failed to remove PF %s\n", pci_name(dev));
481}
482
483static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic)
484{
485	struct usnic_ib_dev *us_ibdev;
486	struct pci_dev *parent_pci, *vf_pci;
487	int err;
488
489	vf_pci = usnic_vnic_get_pdev(vnic);
490	parent_pci = pci_physfn(vf_pci);
491
492	BUG_ON(!parent_pci);
493
494	mutex_lock(&usnic_ib_ibdev_list_lock);
495	list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
496		if (us_ibdev->pdev == parent_pci) {
497			kref_get(&us_ibdev->vf_cnt);
498			goto out;
499		}
500	}
501
502	us_ibdev = usnic_ib_device_add(parent_pci);
503	if (IS_ERR_OR_NULL(us_ibdev)) {
504		us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT);
505		goto out;
506	}
507
508	err = usnic_ib_sysfs_register_usdev(us_ibdev);
509	if (err) {
510		usnic_ib_device_remove(us_ibdev);
511		us_ibdev = ERR_PTR(err);
512		goto out;
513	}
514
515	list_add(&us_ibdev->ib_dev_link, &usnic_ib_ibdev_list);
516out:
517	mutex_unlock(&usnic_ib_ibdev_list_lock);
518	return us_ibdev;
519}
520/* End of PF discovery section */
521
522/* Start of PCI section */
523
524static const struct pci_device_id usnic_ib_pci_ids[] = {
525	{PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)},
526	{0,}
527};
528
529static int usnic_ib_pci_probe(struct pci_dev *pdev,
530				const struct pci_device_id *id)
531{
532	int err;
533	struct usnic_ib_dev *pf;
534	struct usnic_ib_vf *vf;
535	enum usnic_vnic_res_type res_type;
536
537	if (!device_iommu_mapped(&pdev->dev)) {
538		usnic_err("IOMMU required but not present or enabled.  USNIC QPs will not function w/o enabling IOMMU\n");
539		return -EPERM;
540	}
541
542	vf = kzalloc(sizeof(*vf), GFP_KERNEL);
543	if (!vf)
544		return -ENOMEM;
545
546	err = pci_enable_device(pdev);
547	if (err) {
548		usnic_err("Failed to enable %s with err %d\n",
549				pci_name(pdev), err);
550		goto out_clean_vf;
551	}
552
553	err = pci_request_regions(pdev, DRV_NAME);
554	if (err) {
555		usnic_err("Failed to request region for %s with err %d\n",
556				pci_name(pdev), err);
557		goto out_disable_device;
558	}
559
560	pci_set_master(pdev);
561	pci_set_drvdata(pdev, vf);
562
563	vf->vnic = usnic_vnic_alloc(pdev);
564	if (IS_ERR_OR_NULL(vf->vnic)) {
565		err = vf->vnic ? PTR_ERR(vf->vnic) : -ENOMEM;
566		usnic_err("Failed to alloc vnic for %s with err %d\n",
567				pci_name(pdev), err);
568		goto out_release_regions;
569	}
570
571	pf = usnic_ib_discover_pf(vf->vnic);
572	if (IS_ERR_OR_NULL(pf)) {
573		usnic_err("Failed to discover pf of vnic %s with err%ld\n",
574				pci_name(pdev), PTR_ERR(pf));
575		err = pf ? PTR_ERR(pf) : -EFAULT;
576		goto out_clean_vnic;
577	}
578
579	vf->pf = pf;
580	mutex_init(&vf->lock);
581	mutex_lock(&pf->usdev_lock);
582	list_add_tail(&vf->link, &pf->vf_dev_list);
583	/*
584	 * Save max settings (will be same for each VF, easier to re-write than
585	 * to say "if (!set) { set_values(); set=1; }
586	 */
587	for (res_type = USNIC_VNIC_RES_TYPE_EOL+1;
588			res_type < USNIC_VNIC_RES_TYPE_MAX;
589			res_type++) {
590		pf->vf_res_cnt[res_type] = usnic_vnic_res_cnt(vf->vnic,
591								res_type);
592	}
593
594	mutex_unlock(&pf->usdev_lock);
595
596	usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev),
597		   dev_name(&pf->ib_dev.dev));
598	usnic_ib_log_vf(vf);
599	return 0;
600
601out_clean_vnic:
602	usnic_vnic_free(vf->vnic);
603out_release_regions:
604	pci_set_drvdata(pdev, NULL);
 
605	pci_release_regions(pdev);
606out_disable_device:
607	pci_disable_device(pdev);
608out_clean_vf:
609	kfree(vf);
610	return err;
611}
612
613static void usnic_ib_pci_remove(struct pci_dev *pdev)
614{
615	struct usnic_ib_vf *vf = pci_get_drvdata(pdev);
616	struct usnic_ib_dev *pf = vf->pf;
617
618	mutex_lock(&pf->usdev_lock);
619	list_del(&vf->link);
620	mutex_unlock(&pf->usdev_lock);
621
622	kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf);
623	usnic_vnic_free(vf->vnic);
624	pci_set_drvdata(pdev, NULL);
 
625	pci_release_regions(pdev);
626	pci_disable_device(pdev);
627	kfree(vf);
628
629	usnic_info("Removed VF %s\n", pci_name(pdev));
630}
631
632/* PCI driver entry points */
633static struct pci_driver usnic_ib_pci_driver = {
634	.name = DRV_NAME,
635	.id_table = usnic_ib_pci_ids,
636	.probe = usnic_ib_pci_probe,
637	.remove = usnic_ib_pci_remove,
638};
639/* End of PCI section */
640
641/* Start of module section */
642static int __init usnic_ib_init(void)
643{
644	int err;
645
646	printk_once(KERN_INFO "%s", usnic_version);
647
648	err = pci_register_driver(&usnic_ib_pci_driver);
649	if (err) {
 
 
 
 
 
650		usnic_err("Unable to register with PCI\n");
651		goto out_umem_fini;
652	}
653
654	err = register_netdevice_notifier(&usnic_ib_netdevice_notifier);
655	if (err) {
656		usnic_err("Failed to register netdev notifier\n");
657		goto out_pci_unreg;
658	}
659
660	err = register_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
661	if (err) {
662		usnic_err("Failed to register inet addr notifier\n");
663		goto out_unreg_netdev_notifier;
664	}
665
666	err = usnic_transport_init();
667	if (err) {
668		usnic_err("Failed to initialize transport\n");
669		goto out_unreg_inetaddr_notifier;
670	}
671
672	usnic_debugfs_init();
673
674	return 0;
675
676out_unreg_inetaddr_notifier:
677	unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
678out_unreg_netdev_notifier:
679	unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
680out_pci_unreg:
681	pci_unregister_driver(&usnic_ib_pci_driver);
682out_umem_fini:
 
683
684	return err;
685}
686
687static void __exit usnic_ib_destroy(void)
688{
689	usnic_dbg("\n");
690	usnic_debugfs_exit();
691	usnic_transport_fini();
692	unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
693	unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
694	pci_unregister_driver(&usnic_ib_pci_driver);
 
695}
696
697MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver");
698MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>");
699MODULE_LICENSE("Dual BSD/GPL");
 
700module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR);
701module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR);
702MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3");
703MODULE_PARM_DESC(usnic_ib_share_vf, "Off=0, On=1 VF sharing amongst QPs");
704MODULE_DEVICE_TABLE(pci, usnic_ib_pci_ids);
705
706module_init(usnic_ib_init);
707module_exit(usnic_ib_destroy);
708/* End of module section */
v4.6
  1/*
  2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 * Author: Upinder Malhi <umalhi@cisco.com>
 33 * Author: Anant Deepak <anadeepa@cisco.com>
 34 * Author: Cesare Cantu' <cantuc@cisco.com>
 35 * Author: Jeff Squyres <jsquyres@cisco.com>
 36 * Author: Kiran Thirumalai <kithirum@cisco.com>
 37 * Author: Xuyang Wang <xuywang@cisco.com>
 38 * Author: Reese Faucette <rfaucett@cisco.com>
 39 *
 40 */
 41
 42#include <linux/module.h>
 43#include <linux/inetdevice.h>
 44#include <linux/init.h>
 45#include <linux/slab.h>
 46#include <linux/errno.h>
 47#include <linux/pci.h>
 48#include <linux/netdevice.h>
 49
 50#include <rdma/ib_user_verbs.h>
 51#include <rdma/ib_addr.h>
 52
 53#include "usnic_abi.h"
 54#include "usnic_common_util.h"
 55#include "usnic_ib.h"
 56#include "usnic_ib_qp_grp.h"
 57#include "usnic_log.h"
 58#include "usnic_fwd.h"
 59#include "usnic_debugfs.h"
 60#include "usnic_ib_verbs.h"
 61#include "usnic_transport.h"
 62#include "usnic_uiom.h"
 63#include "usnic_ib_sysfs.h"
 64
 65unsigned int usnic_log_lvl = USNIC_LOG_LVL_ERR;
 66unsigned int usnic_ib_share_vf = 1;
 67
 68static const char usnic_version[] =
 69	DRV_NAME ": Cisco VIC (USNIC) Verbs Driver v"
 70	DRV_VERSION " (" DRV_RELDATE ")\n";
 71
 72static DEFINE_MUTEX(usnic_ib_ibdev_list_lock);
 73static LIST_HEAD(usnic_ib_ibdev_list);
 74
 75/* Callback dump funcs */
 76static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz)
 77{
 78	struct usnic_ib_vf *vf = obj;
 79	return scnprintf(buf, buf_sz, "PF: %s ", vf->pf->ib_dev.name);
 80}
 81/* End callback dump funcs */
 82
 83static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz)
 84{
 85	usnic_vnic_dump(vf->vnic, buf, buf_sz, vf,
 86			usnic_ib_dump_vf_hdr,
 87			usnic_ib_qp_grp_dump_hdr, usnic_ib_qp_grp_dump_rows);
 88}
 89
 90void usnic_ib_log_vf(struct usnic_ib_vf *vf)
 91{
 92	char buf[1000];
 93	usnic_ib_dump_vf(vf, buf, sizeof(buf));
 
 
 
 
 94	usnic_dbg("%s\n", buf);
 
 
 95}
 96
 97/* Start of netdev section */
 98static inline const char *usnic_ib_netdev_event_to_string(unsigned long event)
 99{
100	const char *event2str[] = {"NETDEV_NONE", "NETDEV_UP", "NETDEV_DOWN",
101		"NETDEV_REBOOT", "NETDEV_CHANGE",
102		"NETDEV_REGISTER", "NETDEV_UNREGISTER", "NETDEV_CHANGEMTU",
103		"NETDEV_CHANGEADDR", "NETDEV_GOING_DOWN", "NETDEV_FEAT_CHANGE",
104		"NETDEV_BONDING_FAILOVER", "NETDEV_PRE_UP",
105		"NETDEV_PRE_TYPE_CHANGE", "NETDEV_POST_TYPE_CHANGE",
106		"NETDEV_POST_INT", "NETDEV_UNREGISTER_FINAL", "NETDEV_RELEASE",
107		"NETDEV_NOTIFY_PEERS", "NETDEV_JOIN"
108	};
109
110	if (event >= ARRAY_SIZE(event2str))
111		return "UNKNOWN_NETDEV_EVENT";
112	else
113		return event2str[event];
114}
115
116static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev)
117{
118	struct usnic_ib_ucontext *ctx;
119	struct usnic_ib_qp_grp *qp_grp;
120	enum ib_qp_state cur_state;
121	int status;
122
123	BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
124
125	list_for_each_entry(ctx, &us_ibdev->ctx_list, link) {
126		list_for_each_entry(qp_grp, &ctx->qp_grp_list, link) {
127			cur_state = qp_grp->state;
128			if (cur_state == IB_QPS_INIT ||
129				cur_state == IB_QPS_RTR ||
130				cur_state == IB_QPS_RTS) {
131				status = usnic_ib_qp_grp_modify(qp_grp,
132								IB_QPS_ERR,
133								NULL);
134				if (status) {
135					usnic_err("Failed to transistion qp grp %u from %s to %s\n",
136						qp_grp->grp_id,
137						usnic_ib_qp_grp_state_to_string
138						(cur_state),
139						usnic_ib_qp_grp_state_to_string
140						(IB_QPS_ERR));
141				}
142			}
143		}
144	}
145}
146
147static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
148					unsigned long event)
149{
150	struct net_device *netdev;
151	struct ib_event ib_event;
152
153	memset(&ib_event, 0, sizeof(ib_event));
154
155	mutex_lock(&us_ibdev->usdev_lock);
156	netdev = us_ibdev->netdev;
157	switch (event) {
158	case NETDEV_REBOOT:
159		usnic_info("PF Reset on %s\n", us_ibdev->ib_dev.name);
160		usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
161		ib_event.event = IB_EVENT_PORT_ERR;
162		ib_event.device = &us_ibdev->ib_dev;
163		ib_event.element.port_num = 1;
164		ib_dispatch_event(&ib_event);
165		break;
166	case NETDEV_UP:
167	case NETDEV_DOWN:
168	case NETDEV_CHANGE:
169		if (!us_ibdev->ufdev->link_up &&
170				netif_carrier_ok(netdev)) {
171			usnic_fwd_carrier_up(us_ibdev->ufdev);
172			usnic_info("Link UP on %s\n", us_ibdev->ib_dev.name);
 
173			ib_event.event = IB_EVENT_PORT_ACTIVE;
174			ib_event.device = &us_ibdev->ib_dev;
175			ib_event.element.port_num = 1;
176			ib_dispatch_event(&ib_event);
177		} else if (us_ibdev->ufdev->link_up &&
178				!netif_carrier_ok(netdev)) {
179			usnic_fwd_carrier_down(us_ibdev->ufdev);
180			usnic_info("Link DOWN on %s\n", us_ibdev->ib_dev.name);
 
181			usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
182			ib_event.event = IB_EVENT_PORT_ERR;
183			ib_event.device = &us_ibdev->ib_dev;
184			ib_event.element.port_num = 1;
185			ib_dispatch_event(&ib_event);
186		} else {
187			usnic_dbg("Ignoring %s on %s\n",
188					usnic_ib_netdev_event_to_string(event),
189					us_ibdev->ib_dev.name);
190		}
191		break;
192	case NETDEV_CHANGEADDR:
193		if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
194				sizeof(us_ibdev->ufdev->mac))) {
195			usnic_dbg("Ignoring addr change on %s\n",
196					us_ibdev->ib_dev.name);
197		} else {
198			usnic_info(" %s old mac: %pM new mac: %pM\n",
199					us_ibdev->ib_dev.name,
200					us_ibdev->ufdev->mac,
201					netdev->dev_addr);
202			usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr);
203			usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
204			ib_event.event = IB_EVENT_GID_CHANGE;
205			ib_event.device = &us_ibdev->ib_dev;
206			ib_event.element.port_num = 1;
207			ib_dispatch_event(&ib_event);
208		}
209
210		break;
211	case NETDEV_CHANGEMTU:
212		if (us_ibdev->ufdev->mtu != netdev->mtu) {
213			usnic_info("MTU Change on %s old: %u new: %u\n",
214					us_ibdev->ib_dev.name,
215					us_ibdev->ufdev->mtu, netdev->mtu);
216			usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu);
217			usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
218		} else {
219			usnic_dbg("Ignoring MTU change on %s\n",
220					us_ibdev->ib_dev.name);
221		}
222		break;
223	default:
224		usnic_dbg("Ignoring event %s on %s",
225				usnic_ib_netdev_event_to_string(event),
226				us_ibdev->ib_dev.name);
227	}
228	mutex_unlock(&us_ibdev->usdev_lock);
229}
230
231static int usnic_ib_netdevice_event(struct notifier_block *notifier,
232					unsigned long event, void *ptr)
233{
234	struct usnic_ib_dev *us_ibdev;
 
235
236	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
237
238	mutex_lock(&usnic_ib_ibdev_list_lock);
239	list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
240		if (us_ibdev->netdev == netdev) {
241			usnic_ib_handle_usdev_event(us_ibdev, event);
242			break;
243		}
244	}
245	mutex_unlock(&usnic_ib_ibdev_list_lock);
246
247	return NOTIFY_DONE;
248}
249
250static struct notifier_block usnic_ib_netdevice_notifier = {
251	.notifier_call = usnic_ib_netdevice_event
252};
253/* End of netdev section */
254
255/* Start of inet section */
256static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
257					unsigned long event, void *ptr)
258{
259	struct in_ifaddr *ifa = ptr;
260	struct ib_event ib_event;
261
262	mutex_lock(&us_ibdev->usdev_lock);
263
264	switch (event) {
265	case NETDEV_DOWN:
266		usnic_info("%s via ip notifiers",
267				usnic_ib_netdev_event_to_string(event));
268		usnic_fwd_del_ipaddr(us_ibdev->ufdev);
269		usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
270		ib_event.event = IB_EVENT_GID_CHANGE;
271		ib_event.device = &us_ibdev->ib_dev;
272		ib_event.element.port_num = 1;
273		ib_dispatch_event(&ib_event);
274		break;
275	case NETDEV_UP:
276		usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address);
277		usnic_info("%s via ip notifiers: ip %pI4",
278				usnic_ib_netdev_event_to_string(event),
279				&us_ibdev->ufdev->inaddr);
280		ib_event.event = IB_EVENT_GID_CHANGE;
281		ib_event.device = &us_ibdev->ib_dev;
282		ib_event.element.port_num = 1;
283		ib_dispatch_event(&ib_event);
284		break;
285	default:
286		usnic_info("Ignoring event %s on %s",
287				usnic_ib_netdev_event_to_string(event),
288				us_ibdev->ib_dev.name);
289	}
290	mutex_unlock(&us_ibdev->usdev_lock);
291
292	return NOTIFY_DONE;
293}
294
295static int usnic_ib_inetaddr_event(struct notifier_block *notifier,
296					unsigned long event, void *ptr)
297{
298	struct usnic_ib_dev *us_ibdev;
299	struct in_ifaddr *ifa = ptr;
300	struct net_device *netdev = ifa->ifa_dev->dev;
 
301
302	mutex_lock(&usnic_ib_ibdev_list_lock);
303	list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
304		if (us_ibdev->netdev == netdev) {
305			usnic_ib_handle_inet_event(us_ibdev, event, ptr);
306			break;
307		}
308	}
309	mutex_unlock(&usnic_ib_ibdev_list_lock);
310
311	return NOTIFY_DONE;
312}
313static struct notifier_block usnic_ib_inetaddr_notifier = {
314	.notifier_call = usnic_ib_inetaddr_event
315};
316/* End of inet section*/
317
318static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num,
319			        struct ib_port_immutable *immutable)
320{
321	struct ib_port_attr attr;
322	int err;
323
324	err = usnic_ib_query_port(ibdev, port_num, &attr);
 
 
325	if (err)
326		return err;
327
328	immutable->pkey_tbl_len = attr.pkey_tbl_len;
329	immutable->gid_tbl_len = attr.gid_tbl_len;
330
331	return 0;
332}
333
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334/* Start of PF discovery section */
335static void *usnic_ib_device_add(struct pci_dev *dev)
336{
337	struct usnic_ib_dev *us_ibdev;
338	union ib_gid gid;
339	struct in_ifaddr *in;
340	struct net_device *netdev;
 
341
342	usnic_dbg("\n");
343	netdev = pci_get_drvdata(dev);
344
345	us_ibdev = (struct usnic_ib_dev *)ib_alloc_device(sizeof(*us_ibdev));
346	if (!us_ibdev) {
347		usnic_err("Device %s context alloc failed\n",
348				netdev_name(pci_get_drvdata(dev)));
349		return ERR_PTR(-EFAULT);
350	}
351
352	us_ibdev->ufdev = usnic_fwd_dev_alloc(dev);
353	if (!us_ibdev->ufdev) {
354		usnic_err("Failed to alloc ufdev for %s\n", pci_name(dev));
355		goto err_dealloc;
356	}
357
358	mutex_init(&us_ibdev->usdev_lock);
359	INIT_LIST_HEAD(&us_ibdev->vf_dev_list);
360	INIT_LIST_HEAD(&us_ibdev->ctx_list);
361
362	us_ibdev->pdev = dev;
363	us_ibdev->netdev = pci_get_drvdata(dev);
364	us_ibdev->ib_dev.owner = THIS_MODULE;
365	us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP;
366	us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT;
367	us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS;
368	us_ibdev->ib_dev.dma_device = &dev->dev;
369	us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION;
370	strlcpy(us_ibdev->ib_dev.name, "usnic_%d", IB_DEVICE_NAME_MAX);
371
372	us_ibdev->ib_dev.uverbs_cmd_mask =
373		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
374		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
375		(1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
376		(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
377		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
378		(1ull << IB_USER_VERBS_CMD_REG_MR) |
379		(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
380		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
381		(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
382		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
383		(1ull << IB_USER_VERBS_CMD_CREATE_QP) |
384		(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
385		(1ull << IB_USER_VERBS_CMD_QUERY_QP) |
386		(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
387		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
388		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
389		(1ull << IB_USER_VERBS_CMD_OPEN_QP);
390
391	us_ibdev->ib_dev.query_device = usnic_ib_query_device;
392	us_ibdev->ib_dev.query_port = usnic_ib_query_port;
393	us_ibdev->ib_dev.query_pkey = usnic_ib_query_pkey;
394	us_ibdev->ib_dev.query_gid = usnic_ib_query_gid;
395	us_ibdev->ib_dev.get_link_layer = usnic_ib_port_link_layer;
396	us_ibdev->ib_dev.alloc_pd = usnic_ib_alloc_pd;
397	us_ibdev->ib_dev.dealloc_pd = usnic_ib_dealloc_pd;
398	us_ibdev->ib_dev.create_qp = usnic_ib_create_qp;
399	us_ibdev->ib_dev.modify_qp = usnic_ib_modify_qp;
400	us_ibdev->ib_dev.query_qp = usnic_ib_query_qp;
401	us_ibdev->ib_dev.destroy_qp = usnic_ib_destroy_qp;
402	us_ibdev->ib_dev.create_cq = usnic_ib_create_cq;
403	us_ibdev->ib_dev.destroy_cq = usnic_ib_destroy_cq;
404	us_ibdev->ib_dev.reg_user_mr = usnic_ib_reg_mr;
405	us_ibdev->ib_dev.dereg_mr = usnic_ib_dereg_mr;
406	us_ibdev->ib_dev.alloc_ucontext = usnic_ib_alloc_ucontext;
407	us_ibdev->ib_dev.dealloc_ucontext = usnic_ib_dealloc_ucontext;
408	us_ibdev->ib_dev.mmap = usnic_ib_mmap;
409	us_ibdev->ib_dev.create_ah = usnic_ib_create_ah;
410	us_ibdev->ib_dev.destroy_ah = usnic_ib_destroy_ah;
411	us_ibdev->ib_dev.post_send = usnic_ib_post_send;
412	us_ibdev->ib_dev.post_recv = usnic_ib_post_recv;
413	us_ibdev->ib_dev.poll_cq = usnic_ib_poll_cq;
414	us_ibdev->ib_dev.req_notify_cq = usnic_ib_req_notify_cq;
415	us_ibdev->ib_dev.get_dma_mr = usnic_ib_get_dma_mr;
416	us_ibdev->ib_dev.get_port_immutable = usnic_port_immutable;
417
 
418
419	if (ib_register_device(&us_ibdev->ib_dev, NULL))
 
 
 
 
 
420		goto err_fwd_dealloc;
421
422	usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu);
423	usnic_fwd_set_mac(us_ibdev->ufdev, us_ibdev->netdev->dev_addr);
424	if (netif_carrier_ok(us_ibdev->netdev))
425		usnic_fwd_carrier_up(us_ibdev->ufdev);
426
427	in = ((struct in_device *)(netdev->ip_ptr))->ifa_list;
428	if (in != NULL)
429		usnic_fwd_add_ipaddr(us_ibdev->ufdev, in->ifa_address);
 
 
 
 
 
 
 
430
431	usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr,
432				us_ibdev->ufdev->inaddr, &gid.raw[0]);
433	memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id,
434		sizeof(gid.global.interface_id));
435	kref_init(&us_ibdev->vf_cnt);
436
437	usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n",
438			us_ibdev->ib_dev.name, netdev_name(us_ibdev->netdev),
439			us_ibdev->ufdev->mac, us_ibdev->ufdev->link_up,
440			us_ibdev->ufdev->mtu);
441	return us_ibdev;
442
443err_fwd_dealloc:
444	usnic_fwd_dev_free(us_ibdev->ufdev);
445err_dealloc:
446	usnic_err("failed -- deallocing device\n");
447	ib_dealloc_device(&us_ibdev->ib_dev);
448	return NULL;
449}
450
451static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev)
452{
453	usnic_info("Unregistering %s\n", us_ibdev->ib_dev.name);
454	usnic_ib_sysfs_unregister_usdev(us_ibdev);
455	usnic_fwd_dev_free(us_ibdev->ufdev);
456	ib_unregister_device(&us_ibdev->ib_dev);
457	ib_dealloc_device(&us_ibdev->ib_dev);
458}
459
460static void usnic_ib_undiscover_pf(struct kref *kref)
461{
462	struct usnic_ib_dev *us_ibdev, *tmp;
463	struct pci_dev *dev;
464	bool found = false;
465
466	dev = container_of(kref, struct usnic_ib_dev, vf_cnt)->pdev;
467	mutex_lock(&usnic_ib_ibdev_list_lock);
468	list_for_each_entry_safe(us_ibdev, tmp,
469				&usnic_ib_ibdev_list, ib_dev_link) {
470		if (us_ibdev->pdev == dev) {
471			list_del(&us_ibdev->ib_dev_link);
472			usnic_ib_device_remove(us_ibdev);
473			found = true;
474			break;
475		}
476	}
477
478	WARN(!found, "Failed to remove PF %s\n", pci_name(dev));
479
480	mutex_unlock(&usnic_ib_ibdev_list_lock);
 
 
 
 
481}
482
483static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic)
484{
485	struct usnic_ib_dev *us_ibdev;
486	struct pci_dev *parent_pci, *vf_pci;
487	int err;
488
489	vf_pci = usnic_vnic_get_pdev(vnic);
490	parent_pci = pci_physfn(vf_pci);
491
492	BUG_ON(!parent_pci);
493
494	mutex_lock(&usnic_ib_ibdev_list_lock);
495	list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
496		if (us_ibdev->pdev == parent_pci) {
497			kref_get(&us_ibdev->vf_cnt);
498			goto out;
499		}
500	}
501
502	us_ibdev = usnic_ib_device_add(parent_pci);
503	if (IS_ERR_OR_NULL(us_ibdev)) {
504		us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT);
505		goto out;
506	}
507
508	err = usnic_ib_sysfs_register_usdev(us_ibdev);
509	if (err) {
510		usnic_ib_device_remove(us_ibdev);
511		us_ibdev = ERR_PTR(err);
512		goto out;
513	}
514
515	list_add(&us_ibdev->ib_dev_link, &usnic_ib_ibdev_list);
516out:
517	mutex_unlock(&usnic_ib_ibdev_list_lock);
518	return us_ibdev;
519}
520/* End of PF discovery section */
521
522/* Start of PCI section */
523
524static const struct pci_device_id usnic_ib_pci_ids[] = {
525	{PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)},
526	{0,}
527};
528
529static int usnic_ib_pci_probe(struct pci_dev *pdev,
530				const struct pci_device_id *id)
531{
532	int err;
533	struct usnic_ib_dev *pf;
534	struct usnic_ib_vf *vf;
535	enum usnic_vnic_res_type res_type;
536
 
 
 
 
 
537	vf = kzalloc(sizeof(*vf), GFP_KERNEL);
538	if (!vf)
539		return -ENOMEM;
540
541	err = pci_enable_device(pdev);
542	if (err) {
543		usnic_err("Failed to enable %s with err %d\n",
544				pci_name(pdev), err);
545		goto out_clean_vf;
546	}
547
548	err = pci_request_regions(pdev, DRV_NAME);
549	if (err) {
550		usnic_err("Failed to request region for %s with err %d\n",
551				pci_name(pdev), err);
552		goto out_disable_device;
553	}
554
555	pci_set_master(pdev);
556	pci_set_drvdata(pdev, vf);
557
558	vf->vnic = usnic_vnic_alloc(pdev);
559	if (IS_ERR_OR_NULL(vf->vnic)) {
560		err = vf->vnic ? PTR_ERR(vf->vnic) : -ENOMEM;
561		usnic_err("Failed to alloc vnic for %s with err %d\n",
562				pci_name(pdev), err);
563		goto out_release_regions;
564	}
565
566	pf = usnic_ib_discover_pf(vf->vnic);
567	if (IS_ERR_OR_NULL(pf)) {
568		usnic_err("Failed to discover pf of vnic %s with err%ld\n",
569				pci_name(pdev), PTR_ERR(pf));
570		err = pf ? PTR_ERR(pf) : -EFAULT;
571		goto out_clean_vnic;
572	}
573
574	vf->pf = pf;
575	spin_lock_init(&vf->lock);
576	mutex_lock(&pf->usdev_lock);
577	list_add_tail(&vf->link, &pf->vf_dev_list);
578	/*
579	 * Save max settings (will be same for each VF, easier to re-write than
580	 * to say "if (!set) { set_values(); set=1; }
581	 */
582	for (res_type = USNIC_VNIC_RES_TYPE_EOL+1;
583			res_type < USNIC_VNIC_RES_TYPE_MAX;
584			res_type++) {
585		pf->vf_res_cnt[res_type] = usnic_vnic_res_cnt(vf->vnic,
586								res_type);
587	}
588
589	mutex_unlock(&pf->usdev_lock);
590
591	usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev),
592			pf->ib_dev.name);
593	usnic_ib_log_vf(vf);
594	return 0;
595
596out_clean_vnic:
597	usnic_vnic_free(vf->vnic);
598out_release_regions:
599	pci_set_drvdata(pdev, NULL);
600	pci_clear_master(pdev);
601	pci_release_regions(pdev);
602out_disable_device:
603	pci_disable_device(pdev);
604out_clean_vf:
605	kfree(vf);
606	return err;
607}
608
609static void usnic_ib_pci_remove(struct pci_dev *pdev)
610{
611	struct usnic_ib_vf *vf = pci_get_drvdata(pdev);
612	struct usnic_ib_dev *pf = vf->pf;
613
614	mutex_lock(&pf->usdev_lock);
615	list_del(&vf->link);
616	mutex_unlock(&pf->usdev_lock);
617
618	kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf);
619	usnic_vnic_free(vf->vnic);
620	pci_set_drvdata(pdev, NULL);
621	pci_clear_master(pdev);
622	pci_release_regions(pdev);
623	pci_disable_device(pdev);
624	kfree(vf);
625
626	usnic_info("Removed VF %s\n", pci_name(pdev));
627}
628
629/* PCI driver entry points */
630static struct pci_driver usnic_ib_pci_driver = {
631	.name = DRV_NAME,
632	.id_table = usnic_ib_pci_ids,
633	.probe = usnic_ib_pci_probe,
634	.remove = usnic_ib_pci_remove,
635};
636/* End of PCI section */
637
638/* Start of module section */
639static int __init usnic_ib_init(void)
640{
641	int err;
642
643	printk_once(KERN_INFO "%s", usnic_version);
644
645	err = usnic_uiom_init(DRV_NAME);
646	if (err) {
647		usnic_err("Unable to initalize umem with err %d\n", err);
648		return err;
649	}
650
651	if (pci_register_driver(&usnic_ib_pci_driver)) {
652		usnic_err("Unable to register with PCI\n");
653		goto out_umem_fini;
654	}
655
656	err = register_netdevice_notifier(&usnic_ib_netdevice_notifier);
657	if (err) {
658		usnic_err("Failed to register netdev notifier\n");
659		goto out_pci_unreg;
660	}
661
662	err = register_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
663	if (err) {
664		usnic_err("Failed to register inet addr notifier\n");
665		goto out_unreg_netdev_notifier;
666	}
667
668	err = usnic_transport_init();
669	if (err) {
670		usnic_err("Failed to initialize transport\n");
671		goto out_unreg_inetaddr_notifier;
672	}
673
674	usnic_debugfs_init();
675
676	return 0;
677
678out_unreg_inetaddr_notifier:
679	unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
680out_unreg_netdev_notifier:
681	unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
682out_pci_unreg:
683	pci_unregister_driver(&usnic_ib_pci_driver);
684out_umem_fini:
685	usnic_uiom_fini();
686
687	return err;
688}
689
690static void __exit usnic_ib_destroy(void)
691{
692	usnic_dbg("\n");
693	usnic_debugfs_exit();
694	usnic_transport_fini();
695	unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
696	unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
697	pci_unregister_driver(&usnic_ib_pci_driver);
698	usnic_uiom_fini();
699}
700
701MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver");
702MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>");
703MODULE_LICENSE("Dual BSD/GPL");
704MODULE_VERSION(DRV_VERSION);
705module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR);
706module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR);
707MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3");
708MODULE_PARM_DESC(usnic_ib_share_vf, "Off=0, On=1 VF sharing amongst QPs");
709MODULE_DEVICE_TABLE(pci, usnic_ib_pci_ids);
710
711module_init(usnic_ib_init);
712module_exit(usnic_ib_destroy);
713/* End of module section */