Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include "ipoib.h"
  36
  37#include <linux/module.h>
  38
  39#include <linux/init.h>
  40#include <linux/slab.h>
  41#include <linux/kernel.h>
  42#include <linux/vmalloc.h>
  43
  44#include <linux/if_arp.h>	/* For ARPHRD_xxx */
  45
  46#include <linux/ip.h>
  47#include <linux/in.h>
  48
  49#include <linux/jhash.h>
  50#include <net/arp.h>
  51#include <net/addrconf.h>
  52#include <linux/inetdevice.h>
  53#include <rdma/ib_cache.h>
  54
 
 
 
 
  55MODULE_AUTHOR("Roland Dreier");
  56MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
  57MODULE_LICENSE("Dual BSD/GPL");
  58
  59int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
  60int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
  61
  62module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
  63MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
  64module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
  65MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
  66
  67#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
  68int ipoib_debug_level;
  69
  70module_param_named(debug_level, ipoib_debug_level, int, 0644);
  71MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
  72#endif
  73
  74struct ipoib_path_iter {
  75	struct net_device *dev;
  76	struct ipoib_path  path;
  77};
  78
  79static const u8 ipv4_bcast_addr[] = {
  80	0x00, 0xff, 0xff, 0xff,
  81	0xff, 0x12, 0x40, 0x1b,	0x00, 0x00, 0x00, 0x00,
  82	0x00, 0x00, 0x00, 0x00,	0xff, 0xff, 0xff, 0xff
  83};
  84
  85struct workqueue_struct *ipoib_workqueue;
  86
  87struct ib_sa_client ipoib_sa_client;
  88
  89static int ipoib_add_one(struct ib_device *device);
  90static void ipoib_remove_one(struct ib_device *device, void *client_data);
  91static void ipoib_neigh_reclaim(struct rcu_head *rp);
  92static struct net_device *ipoib_get_net_dev_by_params(
  93		struct ib_device *dev, u32 port, u16 pkey,
  94		const union ib_gid *gid, const struct sockaddr *addr,
  95		void *client_data);
  96static int ipoib_set_mac(struct net_device *dev, void *addr);
  97static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
  98		       int cmd);
  99
 100static struct ib_client ipoib_client = {
 101	.name   = "ipoib",
 102	.add    = ipoib_add_one,
 103	.remove = ipoib_remove_one,
 104	.get_net_dev_by_params = ipoib_get_net_dev_by_params,
 105};
 106
 107#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
 108static int ipoib_netdev_event(struct notifier_block *this,
 109			      unsigned long event, void *ptr)
 110{
 111	struct netdev_notifier_info *ni = ptr;
 112	struct net_device *dev = ni->dev;
 113
 114	if (dev->netdev_ops->ndo_open != ipoib_open)
 115		return NOTIFY_DONE;
 116
 117	switch (event) {
 118	case NETDEV_REGISTER:
 119		ipoib_create_debug_files(dev);
 120		break;
 121	case NETDEV_CHANGENAME:
 122		ipoib_delete_debug_files(dev);
 123		ipoib_create_debug_files(dev);
 124		break;
 125	case NETDEV_UNREGISTER:
 126		ipoib_delete_debug_files(dev);
 127		break;
 128	}
 129
 130	return NOTIFY_DONE;
 131}
 132#endif
 133
 134int ipoib_open(struct net_device *dev)
 135{
 136	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 137
 138	ipoib_dbg(priv, "bringing up interface\n");
 139
 140	netif_carrier_off(dev);
 141
 142	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
 143
 
 
 144	if (ipoib_ib_dev_open(dev)) {
 145		if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
 146			return 0;
 147		goto err_disable;
 148	}
 149
 150	ipoib_ib_dev_up(dev);
 151
 152	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
 153		struct ipoib_dev_priv *cpriv;
 154
 155		/* Bring up any child interfaces too */
 156		down_read(&priv->vlan_rwsem);
 157		list_for_each_entry(cpriv, &priv->child_intfs, list) {
 158			int flags;
 159
 160			flags = cpriv->dev->flags;
 161			if (flags & IFF_UP)
 162				continue;
 163
 164			dev_change_flags(cpriv->dev, flags | IFF_UP, NULL);
 165		}
 166		up_read(&priv->vlan_rwsem);
 167	} else if (priv->parent) {
 168		struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
 169
 170		if (!test_bit(IPOIB_FLAG_ADMIN_UP, &ppriv->flags))
 171			ipoib_dbg(priv, "parent device %s is not up, so child device may be not functioning.\n",
 172				  ppriv->dev->name);
 173	}
 
 174	netif_start_queue(dev);
 175
 176	return 0;
 177
 178err_disable:
 179	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
 180
 181	return -EINVAL;
 182}
 183
 184static int ipoib_stop(struct net_device *dev)
 185{
 186	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 187
 188	ipoib_dbg(priv, "stopping interface\n");
 189
 190	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
 191
 192	netif_stop_queue(dev);
 193
 194	ipoib_ib_dev_down(dev);
 195	ipoib_ib_dev_stop(dev);
 196
 197	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
 198		struct ipoib_dev_priv *cpriv;
 199
 200		/* Bring down any child interfaces too */
 201		down_read(&priv->vlan_rwsem);
 202		list_for_each_entry(cpriv, &priv->child_intfs, list) {
 203			int flags;
 204
 205			flags = cpriv->dev->flags;
 206			if (!(flags & IFF_UP))
 207				continue;
 208
 209			dev_change_flags(cpriv->dev, flags & ~IFF_UP, NULL);
 210		}
 211		up_read(&priv->vlan_rwsem);
 212	}
 213
 214	return 0;
 215}
 216
 217static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
 218{
 219	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 220
 221	if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
 222		features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
 223
 224	return features;
 225}
 226
 227static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
 228{
 229	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 230	int ret = 0;
 231
 232	/* dev->mtu > 2K ==> connected mode */
 233	if (ipoib_cm_admin_enabled(dev)) {
 234		if (new_mtu > ipoib_cm_max_mtu(dev))
 235			return -EINVAL;
 236
 237		if (new_mtu > priv->mcast_mtu)
 238			ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
 239				   priv->mcast_mtu);
 240
 241		dev->mtu = new_mtu;
 242		return 0;
 243	}
 244
 245	if (new_mtu < (ETH_MIN_MTU + IPOIB_ENCAP_LEN) ||
 246	    new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
 247		return -EINVAL;
 248
 249	priv->admin_mtu = new_mtu;
 250
 251	if (priv->mcast_mtu < priv->admin_mtu)
 252		ipoib_dbg(priv, "MTU must be smaller than the underlying "
 253				"link layer MTU - 4 (%u)\n", priv->mcast_mtu);
 254
 255	new_mtu = min(priv->mcast_mtu, priv->admin_mtu);
 256
 257	if (priv->rn_ops->ndo_change_mtu) {
 258		bool carrier_status = netif_carrier_ok(dev);
 259
 260		netif_carrier_off(dev);
 261
 262		/* notify lower level on the real mtu */
 263		ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu);
 264
 265		if (carrier_status)
 266			netif_carrier_on(dev);
 267	} else {
 268		dev->mtu = new_mtu;
 269	}
 270
 271	return ret;
 272}
 273
 274static void ipoib_get_stats(struct net_device *dev,
 275			    struct rtnl_link_stats64 *stats)
 276{
 277	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 278
 279	if (priv->rn_ops->ndo_get_stats64)
 280		priv->rn_ops->ndo_get_stats64(dev, stats);
 281	else
 282		netdev_stats_to_stats64(stats, &dev->stats);
 283}
 284
 285/* Called with an RCU read lock taken */
 286static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
 287					struct net_device *dev)
 288{
 289	struct net *net = dev_net(dev);
 290	struct in_device *in_dev;
 291	struct sockaddr_in *addr_in = (struct sockaddr_in *)addr;
 292	struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr;
 293	__be32 ret_addr;
 294
 295	switch (addr->sa_family) {
 296	case AF_INET:
 297		in_dev = in_dev_get(dev);
 298		if (!in_dev)
 299			return false;
 300
 301		ret_addr = inet_confirm_addr(net, in_dev, 0,
 302					     addr_in->sin_addr.s_addr,
 303					     RT_SCOPE_HOST);
 304		in_dev_put(in_dev);
 305		if (ret_addr)
 306			return true;
 307
 308		break;
 309	case AF_INET6:
 310		if (IS_ENABLED(CONFIG_IPV6) &&
 311		    ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1))
 312			return true;
 313
 314		break;
 315	}
 316	return false;
 317}
 318
 319/*
 320 * Find the master net_device on top of the given net_device.
 321 * @dev: base IPoIB net_device
 322 *
 323 * Returns the master net_device with a reference held, or the same net_device
 324 * if no master exists.
 325 */
 326static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
 327{
 328	struct net_device *master;
 329
 330	rcu_read_lock();
 331	master = netdev_master_upper_dev_get_rcu(dev);
 332	if (master)
 333		dev_hold(master);
 334	rcu_read_unlock();
 335
 336	if (master)
 337		return master;
 338
 339	dev_hold(dev);
 340	return dev;
 341}
 342
 343struct ipoib_walk_data {
 344	const struct sockaddr *addr;
 345	struct net_device *result;
 346};
 347
 348static int ipoib_upper_walk(struct net_device *upper,
 349			    struct netdev_nested_priv *priv)
 350{
 351	struct ipoib_walk_data *data = (struct ipoib_walk_data *)priv->data;
 352	int ret = 0;
 353
 354	if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) {
 355		dev_hold(upper);
 356		data->result = upper;
 357		ret = 1;
 358	}
 359
 360	return ret;
 361}
 362
 363/**
 364 * ipoib_get_net_dev_match_addr - Find a net_device matching
 365 * the given address, which is an upper device of the given net_device.
 366 *
 367 * @addr: IP address to look for.
 368 * @dev: base IPoIB net_device
 369 *
 370 * If found, returns the net_device with a reference held. Otherwise return
 371 * NULL.
 372 */
 373static struct net_device *ipoib_get_net_dev_match_addr(
 374		const struct sockaddr *addr, struct net_device *dev)
 375{
 376	struct netdev_nested_priv priv;
 377	struct ipoib_walk_data data = {
 378		.addr = addr,
 379	};
 380
 381	priv.data = (void *)&data;
 382	rcu_read_lock();
 383	if (ipoib_is_dev_match_addr_rcu(addr, dev)) {
 384		dev_hold(dev);
 385		data.result = dev;
 386		goto out;
 387	}
 388
 389	netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &priv);
 390out:
 391	rcu_read_unlock();
 392	return data.result;
 393}
 394
 395/* returns the number of IPoIB netdevs on top a given ipoib device matching a
 396 * pkey_index and address, if one exists.
 397 *
 398 * @found_net_dev: contains a matching net_device if the return value >= 1,
 399 * with a reference held. */
 400static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
 401				     const union ib_gid *gid,
 402				     u16 pkey_index,
 403				     const struct sockaddr *addr,
 404				     int nesting,
 405				     struct net_device **found_net_dev)
 406{
 407	struct ipoib_dev_priv *child_priv;
 408	struct net_device *net_dev = NULL;
 409	int matches = 0;
 410
 411	if (priv->pkey_index == pkey_index &&
 412	    (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) {
 413		if (!addr) {
 414			net_dev = ipoib_get_master_net_dev(priv->dev);
 415		} else {
 416			/* Verify the net_device matches the IP address, as
 417			 * IPoIB child devices currently share a GID. */
 418			net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev);
 419		}
 420		if (net_dev) {
 421			if (!*found_net_dev)
 422				*found_net_dev = net_dev;
 423			else
 424				dev_put(net_dev);
 425			++matches;
 426		}
 427	}
 428
 429	/* Check child interfaces */
 430	down_read_nested(&priv->vlan_rwsem, nesting);
 431	list_for_each_entry(child_priv, &priv->child_intfs, list) {
 432		matches += ipoib_match_gid_pkey_addr(child_priv, gid,
 433						    pkey_index, addr,
 434						    nesting + 1,
 435						    found_net_dev);
 436		if (matches > 1)
 437			break;
 438	}
 439	up_read(&priv->vlan_rwsem);
 440
 441	return matches;
 442}
 443
 444/* Returns the number of matching net_devs found (between 0 and 2). Also
 445 * return the matching net_device in the @net_dev parameter, holding a
 446 * reference to the net_device, if the number of matches >= 1 */
 447static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u32 port,
 448					 u16 pkey_index,
 449					 const union ib_gid *gid,
 450					 const struct sockaddr *addr,
 451					 struct net_device **net_dev)
 452{
 453	struct ipoib_dev_priv *priv;
 454	int matches = 0;
 455
 456	*net_dev = NULL;
 457
 458	list_for_each_entry(priv, dev_list, list) {
 459		if (priv->port != port)
 460			continue;
 461
 462		matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index,
 463						     addr, 0, net_dev);
 464		if (matches > 1)
 465			break;
 466	}
 467
 468	return matches;
 469}
 470
 471static struct net_device *ipoib_get_net_dev_by_params(
 472		struct ib_device *dev, u32 port, u16 pkey,
 473		const union ib_gid *gid, const struct sockaddr *addr,
 474		void *client_data)
 475{
 476	struct net_device *net_dev;
 477	struct list_head *dev_list = client_data;
 478	u16 pkey_index;
 479	int matches;
 480	int ret;
 481
 482	if (!rdma_protocol_ib(dev, port))
 483		return NULL;
 484
 485	ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index);
 486	if (ret)
 487		return NULL;
 488
 
 
 
 489	/* See if we can find a unique device matching the L2 parameters */
 490	matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
 491						gid, NULL, &net_dev);
 492
 493	switch (matches) {
 494	case 0:
 495		return NULL;
 496	case 1:
 497		return net_dev;
 498	}
 499
 500	dev_put(net_dev);
 501
 502	/* Couldn't find a unique device with L2 parameters only. Use L3
 503	 * address to uniquely match the net device */
 504	matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
 505						gid, addr, &net_dev);
 506	switch (matches) {
 507	case 0:
 508		return NULL;
 509	default:
 510		dev_warn_ratelimited(&dev->dev,
 511				     "duplicate IP address detected\n");
 512		fallthrough;
 513	case 1:
 514		return net_dev;
 515	}
 516}
 517
 518int ipoib_set_mode(struct net_device *dev, const char *buf)
 519{
 520	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 521
 522	if ((test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
 523	     !strcmp(buf, "connected\n")) ||
 524	     (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
 525	     !strcmp(buf, "datagram\n"))) {
 526		return 0;
 527	}
 528
 529	/* flush paths if we switch modes so that connections are restarted */
 530	if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
 531		set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
 532		ipoib_warn(priv, "enabling connected mode "
 533			   "will cause multicast packet drops\n");
 534		netdev_update_features(dev);
 535		dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
 536		netif_set_real_num_tx_queues(dev, 1);
 537		rtnl_unlock();
 538		priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
 539
 540		ipoib_flush_paths(dev);
 541		return (!rtnl_trylock()) ? -EBUSY : 0;
 542	}
 543
 544	if (!strcmp(buf, "datagram\n")) {
 545		clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
 546		netdev_update_features(dev);
 547		dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
 548		netif_set_real_num_tx_queues(dev, dev->num_tx_queues);
 549		rtnl_unlock();
 550		ipoib_flush_paths(dev);
 551		return (!rtnl_trylock()) ? -EBUSY : 0;
 552	}
 553
 554	return -EINVAL;
 555}
 556
 557struct ipoib_path *__path_find(struct net_device *dev, void *gid)
 558{
 559	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 560	struct rb_node *n = priv->path_tree.rb_node;
 561	struct ipoib_path *path;
 562	int ret;
 563
 564	while (n) {
 565		path = rb_entry(n, struct ipoib_path, rb_node);
 566
 567		ret = memcmp(gid, path->pathrec.dgid.raw,
 568			     sizeof (union ib_gid));
 569
 570		if (ret < 0)
 571			n = n->rb_left;
 572		else if (ret > 0)
 573			n = n->rb_right;
 574		else
 575			return path;
 576	}
 577
 578	return NULL;
 579}
 580
 581static int __path_add(struct net_device *dev, struct ipoib_path *path)
 582{
 583	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 584	struct rb_node **n = &priv->path_tree.rb_node;
 585	struct rb_node *pn = NULL;
 586	struct ipoib_path *tpath;
 587	int ret;
 588
 589	while (*n) {
 590		pn = *n;
 591		tpath = rb_entry(pn, struct ipoib_path, rb_node);
 592
 593		ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
 594			     sizeof (union ib_gid));
 595		if (ret < 0)
 596			n = &pn->rb_left;
 597		else if (ret > 0)
 598			n = &pn->rb_right;
 599		else
 600			return -EEXIST;
 601	}
 602
 603	rb_link_node(&path->rb_node, pn, n);
 604	rb_insert_color(&path->rb_node, &priv->path_tree);
 605
 606	list_add_tail(&path->list, &priv->path_list);
 607
 608	return 0;
 609}
 610
 611static void path_free(struct net_device *dev, struct ipoib_path *path)
 612{
 613	struct sk_buff *skb;
 614
 615	while ((skb = __skb_dequeue(&path->queue)))
 616		dev_kfree_skb_irq(skb);
 617
 618	ipoib_dbg(ipoib_priv(dev), "%s\n", __func__);
 619
 620	/* remove all neigh connected to this path */
 621	ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
 622
 623	if (path->ah)
 624		ipoib_put_ah(path->ah);
 625
 626	kfree(path);
 627}
 628
 629#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
 630
 631struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
 632{
 633	struct ipoib_path_iter *iter;
 634
 635	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
 636	if (!iter)
 637		return NULL;
 638
 639	iter->dev = dev;
 640	memset(iter->path.pathrec.dgid.raw, 0, 16);
 641
 642	if (ipoib_path_iter_next(iter)) {
 643		kfree(iter);
 644		return NULL;
 645	}
 646
 647	return iter;
 648}
 649
 650int ipoib_path_iter_next(struct ipoib_path_iter *iter)
 651{
 652	struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
 653	struct rb_node *n;
 654	struct ipoib_path *path;
 655	int ret = 1;
 656
 657	spin_lock_irq(&priv->lock);
 658
 659	n = rb_first(&priv->path_tree);
 660
 661	while (n) {
 662		path = rb_entry(n, struct ipoib_path, rb_node);
 663
 664		if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
 665			   sizeof (union ib_gid)) < 0) {
 666			iter->path = *path;
 667			ret = 0;
 668			break;
 669		}
 670
 671		n = rb_next(n);
 672	}
 673
 674	spin_unlock_irq(&priv->lock);
 675
 676	return ret;
 677}
 678
 679void ipoib_path_iter_read(struct ipoib_path_iter *iter,
 680			  struct ipoib_path *path)
 681{
 682	*path = iter->path;
 683}
 684
 685#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
 686
 687void ipoib_mark_paths_invalid(struct net_device *dev)
 688{
 689	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 690	struct ipoib_path *path, *tp;
 691
 692	spin_lock_irq(&priv->lock);
 693
 694	list_for_each_entry_safe(path, tp, &priv->path_list, list) {
 695		ipoib_dbg(priv, "mark path LID 0x%08x GID %pI6 invalid\n",
 696			  be32_to_cpu(sa_path_get_dlid(&path->pathrec)),
 697			  path->pathrec.dgid.raw);
 698		if (path->ah)
 699			path->ah->valid = 0;
 700	}
 701
 702	spin_unlock_irq(&priv->lock);
 703}
 704
 705static void push_pseudo_header(struct sk_buff *skb, const char *daddr)
 706{
 707	struct ipoib_pseudo_header *phdr;
 708
 709	phdr = skb_push(skb, sizeof(*phdr));
 710	memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
 711}
 712
 713void ipoib_flush_paths(struct net_device *dev)
 714{
 715	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 716	struct ipoib_path *path, *tp;
 717	LIST_HEAD(remove_list);
 718	unsigned long flags;
 719
 720	netif_tx_lock_bh(dev);
 721	spin_lock_irqsave(&priv->lock, flags);
 722
 723	list_splice_init(&priv->path_list, &remove_list);
 724
 725	list_for_each_entry(path, &remove_list, list)
 726		rb_erase(&path->rb_node, &priv->path_tree);
 727
 728	list_for_each_entry_safe(path, tp, &remove_list, list) {
 729		if (path->query)
 730			ib_sa_cancel_query(path->query_id, path->query);
 731		spin_unlock_irqrestore(&priv->lock, flags);
 732		netif_tx_unlock_bh(dev);
 733		wait_for_completion(&path->done);
 734		path_free(dev, path);
 735		netif_tx_lock_bh(dev);
 736		spin_lock_irqsave(&priv->lock, flags);
 737	}
 738
 739	spin_unlock_irqrestore(&priv->lock, flags);
 740	netif_tx_unlock_bh(dev);
 741}
 742
 743static void path_rec_completion(int status,
 744				struct sa_path_rec *pathrec,
 745				unsigned int num_prs, void *path_ptr)
 746{
 747	struct ipoib_path *path = path_ptr;
 748	struct net_device *dev = path->dev;
 749	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 750	struct ipoib_ah *ah = NULL;
 751	struct ipoib_ah *old_ah = NULL;
 752	struct ipoib_neigh *neigh, *tn;
 753	struct sk_buff_head skqueue;
 754	struct sk_buff *skb;
 755	unsigned long flags;
 756
 757	if (!status)
 758		ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
 759			  be32_to_cpu(sa_path_get_dlid(pathrec)),
 760			  pathrec->dgid.raw);
 761	else
 762		ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
 763			  status, path->pathrec.dgid.raw);
 764
 765	skb_queue_head_init(&skqueue);
 766
 767	if (!status) {
 768		struct rdma_ah_attr av;
 769
 770		if (!ib_init_ah_attr_from_path(priv->ca, priv->port,
 771					       pathrec, &av, NULL)) {
 772			ah = ipoib_create_ah(dev, priv->pd, &av);
 773			rdma_destroy_ah_attr(&av);
 774		}
 775	}
 776
 777	spin_lock_irqsave(&priv->lock, flags);
 778
 779	if (!IS_ERR_OR_NULL(ah)) {
 780		/*
 781		 * pathrec.dgid is used as the database key from the LLADDR,
 782		 * it must remain unchanged even if the SA returns a different
 783		 * GID to use in the AH.
 784		 */
 785		if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw,
 786			   sizeof(union ib_gid))) {
 787			ipoib_dbg(
 788				priv,
 789				"%s got PathRec for gid %pI6 while asked for %pI6\n",
 790				dev->name, pathrec->dgid.raw,
 791				path->pathrec.dgid.raw);
 792			memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw,
 793			       sizeof(union ib_gid));
 794		}
 795
 796		path->pathrec = *pathrec;
 797
 798		old_ah   = path->ah;
 799		path->ah = ah;
 800
 801		ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
 802			  ah, be32_to_cpu(sa_path_get_dlid(pathrec)),
 803			  pathrec->sl);
 804
 805		while ((skb = __skb_dequeue(&path->queue)))
 806			__skb_queue_tail(&skqueue, skb);
 807
 808		list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
 809			if (neigh->ah) {
 810				WARN_ON(neigh->ah != old_ah);
 811				/*
 812				 * Dropping the ah reference inside
 813				 * priv->lock is safe here, because we
 814				 * will hold one more reference from
 815				 * the original value of path->ah (ie
 816				 * old_ah).
 817				 */
 818				ipoib_put_ah(neigh->ah);
 819			}
 820			kref_get(&path->ah->ref);
 821			neigh->ah = path->ah;
 822
 823			if (ipoib_cm_enabled(dev, neigh->daddr)) {
 824				if (!ipoib_cm_get(neigh))
 825					ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
 826									       path,
 827									       neigh));
 828				if (!ipoib_cm_get(neigh)) {
 829					ipoib_neigh_free(neigh);
 830					continue;
 831				}
 832			}
 833
 834			while ((skb = __skb_dequeue(&neigh->queue)))
 835				__skb_queue_tail(&skqueue, skb);
 836		}
 837		path->ah->valid = 1;
 838	}
 839
 840	path->query = NULL;
 841	complete(&path->done);
 842
 843	spin_unlock_irqrestore(&priv->lock, flags);
 844
 845	if (IS_ERR_OR_NULL(ah))
 846		ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
 847
 848	if (old_ah)
 849		ipoib_put_ah(old_ah);
 850
 851	while ((skb = __skb_dequeue(&skqueue))) {
 852		int ret;
 853		skb->dev = dev;
 854		ret = dev_queue_xmit(skb);
 855		if (ret)
 856			ipoib_warn(priv, "%s: dev_queue_xmit failed to re-queue packet, ret:%d\n",
 857				   __func__, ret);
 858	}
 859}
 860
 861static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path,
 862			  void *gid)
 863{
 864	path->dev = priv->dev;
 865
 866	if (rdma_cap_opa_ah(priv->ca, priv->port))
 867		path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
 868	else
 869		path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
 870
 871	memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid));
 872	path->pathrec.sgid	    = priv->local_gid;
 873	path->pathrec.pkey	    = cpu_to_be16(priv->pkey);
 874	path->pathrec.numb_path     = 1;
 875	path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
 876}
 877
 878static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
 879{
 880	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 881	struct ipoib_path *path;
 882
 883	if (!priv->broadcast)
 884		return NULL;
 885
 886	path = kzalloc(sizeof(*path), GFP_ATOMIC);
 887	if (!path)
 888		return NULL;
 889
 890	skb_queue_head_init(&path->queue);
 891
 892	INIT_LIST_HEAD(&path->neigh_list);
 893
 894	init_path_rec(priv, path, gid);
 895
 896	return path;
 897}
 898
 899static int path_rec_start(struct net_device *dev,
 900			  struct ipoib_path *path)
 901{
 902	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 903
 904	ipoib_dbg(priv, "Start path record lookup for %pI6\n",
 905		  path->pathrec.dgid.raw);
 906
 907	init_completion(&path->done);
 908
 909	path->query_id =
 910		ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
 911				   &path->pathrec,
 912				   IB_SA_PATH_REC_DGID		|
 913				   IB_SA_PATH_REC_SGID		|
 914				   IB_SA_PATH_REC_NUMB_PATH	|
 915				   IB_SA_PATH_REC_TRAFFIC_CLASS |
 916				   IB_SA_PATH_REC_PKEY,
 917				   1000, GFP_ATOMIC,
 918				   path_rec_completion,
 919				   path, &path->query);
 920	if (path->query_id < 0) {
 921		ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
 922		path->query = NULL;
 923		complete(&path->done);
 924		return path->query_id;
 925	}
 926
 927	return 0;
 928}
 929
 930static void neigh_refresh_path(struct ipoib_neigh *neigh, u8 *daddr,
 931			       struct net_device *dev)
 932{
 933	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 934	struct ipoib_path *path;
 935	unsigned long flags;
 936
 937	spin_lock_irqsave(&priv->lock, flags);
 938
 939	path = __path_find(dev, daddr + 4);
 940	if (!path)
 941		goto out;
 942	if (!path->query)
 943		path_rec_start(dev, path);
 944out:
 945	spin_unlock_irqrestore(&priv->lock, flags);
 946}
 947
 948static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
 949					  struct net_device *dev)
 950{
 951	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 952	struct rdma_netdev *rn = netdev_priv(dev);
 953	struct ipoib_path *path;
 954	struct ipoib_neigh *neigh;
 955	unsigned long flags;
 956
 957	spin_lock_irqsave(&priv->lock, flags);
 958	neigh = ipoib_neigh_alloc(daddr, dev);
 959	if (!neigh) {
 960		spin_unlock_irqrestore(&priv->lock, flags);
 961		++dev->stats.tx_dropped;
 962		dev_kfree_skb_any(skb);
 963		return NULL;
 964	}
 965
 966	/* To avoid race condition, make sure that the
 967	 * neigh will be added only once.
 968	 */
 969	if (unlikely(!list_empty(&neigh->list))) {
 970		spin_unlock_irqrestore(&priv->lock, flags);
 971		return neigh;
 972	}
 973
 974	path = __path_find(dev, daddr + 4);
 975	if (!path) {
 976		path = path_rec_create(dev, daddr + 4);
 977		if (!path)
 978			goto err_path;
 979
 980		__path_add(dev, path);
 981	}
 982
 983	list_add_tail(&neigh->list, &path->neigh_list);
 984
 985	if (path->ah && path->ah->valid) {
 986		kref_get(&path->ah->ref);
 987		neigh->ah = path->ah;
 988
 989		if (ipoib_cm_enabled(dev, neigh->daddr)) {
 990			if (!ipoib_cm_get(neigh))
 991				ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
 992			if (!ipoib_cm_get(neigh)) {
 993				ipoib_neigh_free(neigh);
 994				goto err_drop;
 995			}
 996			if (skb_queue_len(&neigh->queue) <
 997			    IPOIB_MAX_PATH_REC_QUEUE) {
 998				push_pseudo_header(skb, neigh->daddr);
 999				__skb_queue_tail(&neigh->queue, skb);
1000			} else {
1001				ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
1002					   skb_queue_len(&neigh->queue));
1003				goto err_drop;
1004			}
1005		} else {
1006			spin_unlock_irqrestore(&priv->lock, flags);
1007			path->ah->last_send = rn->send(dev, skb, path->ah->ah,
1008						       IPOIB_QPN(daddr));
1009			ipoib_neigh_put(neigh);
1010			return NULL;
1011		}
1012	} else {
1013		neigh->ah  = NULL;
1014
1015		if (!path->query && path_rec_start(dev, path))
1016			goto err_path;
1017		if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1018			push_pseudo_header(skb, neigh->daddr);
1019			__skb_queue_tail(&neigh->queue, skb);
1020		} else {
1021			goto err_drop;
1022		}
1023	}
1024
1025	spin_unlock_irqrestore(&priv->lock, flags);
1026	ipoib_neigh_put(neigh);
1027	return NULL;
1028
1029err_path:
1030	ipoib_neigh_free(neigh);
1031err_drop:
1032	++dev->stats.tx_dropped;
1033	dev_kfree_skb_any(skb);
1034
1035	spin_unlock_irqrestore(&priv->lock, flags);
1036	ipoib_neigh_put(neigh);
1037
1038	return NULL;
1039}
1040
1041static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
1042			     struct ipoib_pseudo_header *phdr)
1043{
1044	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1045	struct rdma_netdev *rn = netdev_priv(dev);
1046	struct ipoib_path *path;
1047	unsigned long flags;
1048
1049	spin_lock_irqsave(&priv->lock, flags);
1050
1051	/* no broadcast means that all paths are (going to be) not valid */
1052	if (!priv->broadcast)
1053		goto drop_and_unlock;
1054
1055	path = __path_find(dev, phdr->hwaddr + 4);
1056	if (!path || !path->ah || !path->ah->valid) {
1057		if (!path) {
1058			path = path_rec_create(dev, phdr->hwaddr + 4);
1059			if (!path)
1060				goto drop_and_unlock;
1061			__path_add(dev, path);
1062		} else {
1063			/*
1064			 * make sure there are no changes in the existing
1065			 * path record
1066			 */
1067			init_path_rec(priv, path, phdr->hwaddr + 4);
1068		}
1069		if (!path->query && path_rec_start(dev, path)) {
1070			goto drop_and_unlock;
1071		}
1072
1073		if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1074			push_pseudo_header(skb, phdr->hwaddr);
1075			__skb_queue_tail(&path->queue, skb);
1076			goto unlock;
1077		} else {
1078			goto drop_and_unlock;
1079		}
1080	}
1081
1082	spin_unlock_irqrestore(&priv->lock, flags);
1083	ipoib_dbg(priv, "Send unicast ARP to %08x\n",
1084		  be32_to_cpu(sa_path_get_dlid(&path->pathrec)));
1085	path->ah->last_send = rn->send(dev, skb, path->ah->ah,
1086				       IPOIB_QPN(phdr->hwaddr));
1087	return;
1088
1089drop_and_unlock:
1090	++dev->stats.tx_dropped;
1091	dev_kfree_skb_any(skb);
1092unlock:
1093	spin_unlock_irqrestore(&priv->lock, flags);
1094}
1095
1096static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1097{
1098	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1099	struct rdma_netdev *rn = netdev_priv(dev);
1100	struct ipoib_neigh *neigh;
1101	struct ipoib_pseudo_header *phdr;
1102	struct ipoib_header *header;
1103	unsigned long flags;
1104
1105	phdr = (struct ipoib_pseudo_header *) skb->data;
1106	skb_pull(skb, sizeof(*phdr));
1107	header = (struct ipoib_header *) skb->data;
1108
1109	if (unlikely(phdr->hwaddr[4] == 0xff)) {
1110		/* multicast, arrange "if" according to probability */
1111		if ((header->proto != htons(ETH_P_IP)) &&
1112		    (header->proto != htons(ETH_P_IPV6)) &&
1113		    (header->proto != htons(ETH_P_ARP)) &&
1114		    (header->proto != htons(ETH_P_RARP)) &&
1115		    (header->proto != htons(ETH_P_TIPC))) {
1116			/* ethertype not supported by IPoIB */
1117			++dev->stats.tx_dropped;
1118			dev_kfree_skb_any(skb);
1119			return NETDEV_TX_OK;
1120		}
1121		/* Add in the P_Key for multicast*/
1122		phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
1123		phdr->hwaddr[9] = priv->pkey & 0xff;
1124
1125		neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1126		if (likely(neigh))
1127			goto send_using_neigh;
1128		ipoib_mcast_send(dev, phdr->hwaddr, skb);
1129		return NETDEV_TX_OK;
1130	}
1131
1132	/* unicast, arrange "switch" according to probability */
1133	switch (header->proto) {
1134	case htons(ETH_P_IP):
1135	case htons(ETH_P_IPV6):
1136	case htons(ETH_P_TIPC):
1137		neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1138		if (unlikely(!neigh)) {
1139			neigh = neigh_add_path(skb, phdr->hwaddr, dev);
1140			if (likely(!neigh))
1141				return NETDEV_TX_OK;
1142		}
1143		break;
1144	case htons(ETH_P_ARP):
1145	case htons(ETH_P_RARP):
1146		/* for unicast ARP and RARP should always perform path find */
1147		unicast_arp_send(skb, dev, phdr);
1148		return NETDEV_TX_OK;
1149	default:
1150		/* ethertype not supported by IPoIB */
1151		++dev->stats.tx_dropped;
1152		dev_kfree_skb_any(skb);
1153		return NETDEV_TX_OK;
1154	}
1155
1156send_using_neigh:
1157	/* note we now hold a ref to neigh */
1158	if (ipoib_cm_get(neigh)) {
1159		if (ipoib_cm_up(neigh)) {
1160			ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
1161			goto unref;
1162		}
1163	} else if (neigh->ah && neigh->ah->valid) {
1164		neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah,
1165						IPOIB_QPN(phdr->hwaddr));
1166		goto unref;
1167	} else if (neigh->ah) {
1168		neigh_refresh_path(neigh, phdr->hwaddr, dev);
1169	}
1170
1171	if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1172		push_pseudo_header(skb, phdr->hwaddr);
1173		spin_lock_irqsave(&priv->lock, flags);
1174		__skb_queue_tail(&neigh->queue, skb);
1175		spin_unlock_irqrestore(&priv->lock, flags);
1176	} else {
1177		++dev->stats.tx_dropped;
1178		dev_kfree_skb_any(skb);
1179	}
1180
1181unref:
1182	ipoib_neigh_put(neigh);
1183
1184	return NETDEV_TX_OK;
1185}
1186
1187static void ipoib_timeout(struct net_device *dev, unsigned int txqueue)
1188{
1189	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1190	struct rdma_netdev *rn = netdev_priv(dev);
1191
1192	if (rn->tx_timeout) {
1193		rn->tx_timeout(dev, txqueue);
1194		return;
1195	}
1196	ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
1197		   jiffies_to_msecs(jiffies - dev_trans_start(dev)));
1198	ipoib_warn(priv,
1199		   "queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n",
1200		   netif_queue_stopped(dev), priv->tx_head, priv->tx_tail,
1201		   priv->global_tx_head, priv->global_tx_tail);
1202
1203
1204	schedule_work(&priv->tx_timeout_work);
1205}
1206
1207void ipoib_ib_tx_timeout_work(struct work_struct *work)
1208{
1209	struct ipoib_dev_priv *priv = container_of(work,
1210						   struct ipoib_dev_priv,
1211						   tx_timeout_work);
1212	int err;
1213
1214	rtnl_lock();
1215
1216	if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
1217		goto unlock;
1218
1219	ipoib_stop(priv->dev);
1220	err = ipoib_open(priv->dev);
1221	if (err) {
1222		ipoib_warn(priv, "ipoib_open failed recovering from a tx_timeout, err(%d).\n",
1223				err);
1224		goto unlock;
1225	}
1226
1227	netif_tx_wake_all_queues(priv->dev);
1228unlock:
1229	rtnl_unlock();
1230
1231}
1232
1233static int ipoib_hard_header(struct sk_buff *skb,
1234			     struct net_device *dev,
1235			     unsigned short type,
1236			     const void *daddr,
1237			     const void *saddr,
1238			     unsigned int len)
1239{
1240	struct ipoib_header *header;
1241
1242	header = skb_push(skb, sizeof(*header));
1243
1244	header->proto = htons(type);
1245	header->reserved = 0;
1246
1247	/*
1248	 * we don't rely on dst_entry structure,  always stuff the
1249	 * destination address into skb hard header so we can figure out where
1250	 * to send the packet later.
1251	 */
1252	push_pseudo_header(skb, daddr);
1253
1254	return IPOIB_HARD_LEN;
1255}
1256
1257static void ipoib_set_mcast_list(struct net_device *dev)
1258{
1259	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1260
1261	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
1262		ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
1263		return;
1264	}
1265
1266	queue_work(priv->wq, &priv->restart_task);
1267}
1268
1269static int ipoib_get_iflink(const struct net_device *dev)
1270{
1271	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1272
1273	/* parent interface */
1274	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1275		return dev->ifindex;
1276
1277	/* child/vlan interface */
1278	return priv->parent->ifindex;
1279}
1280
1281static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
1282{
1283	/*
1284	 * Use only the address parts that contributes to spreading
1285	 * The subnet prefix is not used as one can not connect to
1286	 * same remote port (GUID) using the same remote QPN via two
1287	 * different subnets.
1288	 */
1289	 /* qpn octets[1:4) & port GUID octets[12:20) */
1290	u32 *d32 = (u32 *) daddr;
1291	u32 hv;
1292
1293	hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0);
1294	return hv & htbl->mask;
1295}
1296
1297struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
1298{
1299	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1300	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1301	struct ipoib_neigh_hash *htbl;
1302	struct ipoib_neigh *neigh = NULL;
1303	u32 hash_val;
1304
1305	rcu_read_lock_bh();
1306
1307	htbl = rcu_dereference_bh(ntbl->htbl);
1308
1309	if (!htbl)
1310		goto out_unlock;
1311
1312	hash_val = ipoib_addr_hash(htbl, daddr);
1313	for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
1314	     neigh != NULL;
1315	     neigh = rcu_dereference_bh(neigh->hnext)) {
1316		if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1317			/* found, take one ref on behalf of the caller */
1318			if (!refcount_inc_not_zero(&neigh->refcnt)) {
1319				/* deleted */
1320				neigh = NULL;
1321				goto out_unlock;
1322			}
1323
1324			if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
1325				neigh->alive = jiffies;
1326			goto out_unlock;
1327		}
1328	}
1329
1330out_unlock:
1331	rcu_read_unlock_bh();
1332	return neigh;
1333}
1334
1335static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
1336{
1337	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1338	struct ipoib_neigh_hash *htbl;
1339	unsigned long neigh_obsolete;
1340	unsigned long dt;
1341	unsigned long flags;
1342	int i;
1343	LIST_HEAD(remove_list);
1344
1345	spin_lock_irqsave(&priv->lock, flags);
1346
1347	htbl = rcu_dereference_protected(ntbl->htbl,
1348					 lockdep_is_held(&priv->lock));
1349
1350	if (!htbl)
1351		goto out_unlock;
1352
1353	/* neigh is obsolete if it was idle for two GC periods */
1354	dt = 2 * arp_tbl.gc_interval;
1355	neigh_obsolete = jiffies - dt;
1356
1357	for (i = 0; i < htbl->size; i++) {
1358		struct ipoib_neigh *neigh;
1359		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1360
1361		while ((neigh = rcu_dereference_protected(*np,
1362							  lockdep_is_held(&priv->lock))) != NULL) {
1363			/* was the neigh idle for two GC periods */
1364			if (time_after(neigh_obsolete, neigh->alive)) {
1365
1366				ipoib_check_and_add_mcast_sendonly(priv, neigh->daddr + 4, &remove_list);
1367
1368				rcu_assign_pointer(*np,
1369						   rcu_dereference_protected(neigh->hnext,
1370									     lockdep_is_held(&priv->lock)));
1371				/* remove from path/mc list */
1372				list_del_init(&neigh->list);
1373				call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1374			} else {
1375				np = &neigh->hnext;
1376			}
1377
1378		}
1379	}
1380
1381out_unlock:
1382	spin_unlock_irqrestore(&priv->lock, flags);
1383	ipoib_mcast_remove_list(&remove_list);
1384}
1385
1386static void ipoib_reap_neigh(struct work_struct *work)
1387{
1388	struct ipoib_dev_priv *priv =
1389		container_of(work, struct ipoib_dev_priv, neigh_reap_task.work);
1390
1391	__ipoib_reap_neigh(priv);
1392
1393	queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1394			   arp_tbl.gc_interval);
1395}
1396
1397
1398static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
1399				      struct net_device *dev)
1400{
1401	struct ipoib_neigh *neigh;
1402
1403	neigh = kzalloc(sizeof(*neigh), GFP_ATOMIC);
1404	if (!neigh)
1405		return NULL;
1406
1407	neigh->dev = dev;
1408	memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr));
1409	skb_queue_head_init(&neigh->queue);
1410	INIT_LIST_HEAD(&neigh->list);
1411	ipoib_cm_set(neigh, NULL);
1412	/* one ref on behalf of the caller */
1413	refcount_set(&neigh->refcnt, 1);
1414
1415	return neigh;
1416}
1417
1418struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
1419				      struct net_device *dev)
1420{
1421	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1422	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1423	struct ipoib_neigh_hash *htbl;
1424	struct ipoib_neigh *neigh;
1425	u32 hash_val;
1426
1427	htbl = rcu_dereference_protected(ntbl->htbl,
1428					 lockdep_is_held(&priv->lock));
1429	if (!htbl) {
1430		neigh = NULL;
1431		goto out_unlock;
1432	}
1433
1434	/* need to add a new neigh, but maybe some other thread succeeded?
1435	 * recalc hash, maybe hash resize took place so we do a search
1436	 */
1437	hash_val = ipoib_addr_hash(htbl, daddr);
1438	for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
1439					       lockdep_is_held(&priv->lock));
1440	     neigh != NULL;
1441	     neigh = rcu_dereference_protected(neigh->hnext,
1442					       lockdep_is_held(&priv->lock))) {
1443		if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1444			/* found, take one ref on behalf of the caller */
1445			if (!refcount_inc_not_zero(&neigh->refcnt)) {
1446				/* deleted */
1447				neigh = NULL;
1448				break;
1449			}
1450			neigh->alive = jiffies;
1451			goto out_unlock;
1452		}
1453	}
1454
1455	neigh = ipoib_neigh_ctor(daddr, dev);
1456	if (!neigh)
1457		goto out_unlock;
1458
1459	/* one ref on behalf of the hash table */
1460	refcount_inc(&neigh->refcnt);
1461	neigh->alive = jiffies;
1462	/* put in hash */
1463	rcu_assign_pointer(neigh->hnext,
1464			   rcu_dereference_protected(htbl->buckets[hash_val],
1465						     lockdep_is_held(&priv->lock)));
1466	rcu_assign_pointer(htbl->buckets[hash_val], neigh);
1467	atomic_inc(&ntbl->entries);
1468
1469out_unlock:
1470
1471	return neigh;
1472}
1473
1474void ipoib_neigh_dtor(struct ipoib_neigh *neigh)
1475{
1476	/* neigh reference count was dropprd to zero */
1477	struct net_device *dev = neigh->dev;
1478	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1479	struct sk_buff *skb;
1480	if (neigh->ah)
1481		ipoib_put_ah(neigh->ah);
1482	while ((skb = __skb_dequeue(&neigh->queue))) {
1483		++dev->stats.tx_dropped;
1484		dev_kfree_skb_any(skb);
1485	}
1486	if (ipoib_cm_get(neigh))
1487		ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
1488	ipoib_dbg(ipoib_priv(dev),
1489		  "neigh free for %06x %pI6\n",
1490		  IPOIB_QPN(neigh->daddr),
1491		  neigh->daddr + 4);
1492	kfree(neigh);
1493	if (atomic_dec_and_test(&priv->ntbl.entries)) {
1494		if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags))
1495			complete(&priv->ntbl.flushed);
1496	}
1497}
1498
1499static void ipoib_neigh_reclaim(struct rcu_head *rp)
1500{
1501	/* Called as a result of removal from hash table */
1502	struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu);
1503	/* note TX context may hold another ref */
1504	ipoib_neigh_put(neigh);
1505}
1506
1507void ipoib_neigh_free(struct ipoib_neigh *neigh)
1508{
1509	struct net_device *dev = neigh->dev;
1510	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1511	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1512	struct ipoib_neigh_hash *htbl;
1513	struct ipoib_neigh __rcu **np;
1514	struct ipoib_neigh *n;
1515	u32 hash_val;
1516
1517	htbl = rcu_dereference_protected(ntbl->htbl,
1518					lockdep_is_held(&priv->lock));
1519	if (!htbl)
1520		return;
1521
1522	hash_val = ipoib_addr_hash(htbl, neigh->daddr);
1523	np = &htbl->buckets[hash_val];
1524	for (n = rcu_dereference_protected(*np,
1525					    lockdep_is_held(&priv->lock));
1526	     n != NULL;
1527	     n = rcu_dereference_protected(*np,
1528					lockdep_is_held(&priv->lock))) {
1529		if (n == neigh) {
1530			/* found */
1531			rcu_assign_pointer(*np,
1532					   rcu_dereference_protected(neigh->hnext,
1533								     lockdep_is_held(&priv->lock)));
1534			/* remove from parent list */
1535			list_del_init(&neigh->list);
1536			call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1537			return;
1538		} else {
1539			np = &n->hnext;
1540		}
1541	}
1542}
1543
1544static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
1545{
1546	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1547	struct ipoib_neigh_hash *htbl;
1548	struct ipoib_neigh __rcu **buckets;
1549	u32 size;
1550
1551	clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1552	ntbl->htbl = NULL;
1553	htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
1554	if (!htbl)
1555		return -ENOMEM;
1556	size = roundup_pow_of_two(arp_tbl.gc_thresh3);
1557	buckets = kvcalloc(size, sizeof(*buckets), GFP_KERNEL);
1558	if (!buckets) {
1559		kfree(htbl);
1560		return -ENOMEM;
1561	}
1562	htbl->size = size;
1563	htbl->mask = (size - 1);
1564	htbl->buckets = buckets;
1565	RCU_INIT_POINTER(ntbl->htbl, htbl);
1566	htbl->ntbl = ntbl;
1567	atomic_set(&ntbl->entries, 0);
1568
1569	/* start garbage collection */
1570	queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1571			   arp_tbl.gc_interval);
1572
1573	return 0;
1574}
1575
1576static void neigh_hash_free_rcu(struct rcu_head *head)
1577{
1578	struct ipoib_neigh_hash *htbl = container_of(head,
1579						    struct ipoib_neigh_hash,
1580						    rcu);
1581	struct ipoib_neigh __rcu **buckets = htbl->buckets;
1582	struct ipoib_neigh_table *ntbl = htbl->ntbl;
1583
1584	kvfree(buckets);
1585	kfree(htbl);
1586	complete(&ntbl->deleted);
1587}
1588
1589void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
1590{
1591	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1592	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1593	struct ipoib_neigh_hash *htbl;
1594	unsigned long flags;
1595	int i;
1596
1597	/* remove all neigh connected to a given path or mcast */
1598	spin_lock_irqsave(&priv->lock, flags);
1599
1600	htbl = rcu_dereference_protected(ntbl->htbl,
1601					 lockdep_is_held(&priv->lock));
1602
1603	if (!htbl)
1604		goto out_unlock;
1605
1606	for (i = 0; i < htbl->size; i++) {
1607		struct ipoib_neigh *neigh;
1608		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1609
1610		while ((neigh = rcu_dereference_protected(*np,
1611							  lockdep_is_held(&priv->lock))) != NULL) {
1612			/* delete neighs belong to this parent */
1613			if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) {
1614				rcu_assign_pointer(*np,
1615						   rcu_dereference_protected(neigh->hnext,
1616									     lockdep_is_held(&priv->lock)));
1617				/* remove from parent list */
1618				list_del_init(&neigh->list);
1619				call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1620			} else {
1621				np = &neigh->hnext;
1622			}
1623
1624		}
1625	}
1626out_unlock:
1627	spin_unlock_irqrestore(&priv->lock, flags);
1628}
1629
1630static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
1631{
1632	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1633	struct ipoib_neigh_hash *htbl;
1634	unsigned long flags;
1635	int i, wait_flushed = 0;
1636
1637	init_completion(&priv->ntbl.flushed);
1638	set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1639
1640	spin_lock_irqsave(&priv->lock, flags);
1641
1642	htbl = rcu_dereference_protected(ntbl->htbl,
1643					lockdep_is_held(&priv->lock));
1644	if (!htbl)
1645		goto out_unlock;
1646
1647	wait_flushed = atomic_read(&priv->ntbl.entries);
1648	if (!wait_flushed)
1649		goto free_htbl;
1650
1651	for (i = 0; i < htbl->size; i++) {
1652		struct ipoib_neigh *neigh;
1653		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1654
1655		while ((neigh = rcu_dereference_protected(*np,
1656				       lockdep_is_held(&priv->lock))) != NULL) {
1657			rcu_assign_pointer(*np,
1658					   rcu_dereference_protected(neigh->hnext,
1659								     lockdep_is_held(&priv->lock)));
1660			/* remove from path/mc list */
1661			list_del_init(&neigh->list);
1662			call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1663		}
1664	}
1665
1666free_htbl:
1667	rcu_assign_pointer(ntbl->htbl, NULL);
1668	call_rcu(&htbl->rcu, neigh_hash_free_rcu);
1669
1670out_unlock:
1671	spin_unlock_irqrestore(&priv->lock, flags);
1672	if (wait_flushed)
1673		wait_for_completion(&priv->ntbl.flushed);
1674}
1675
1676static void ipoib_neigh_hash_uninit(struct net_device *dev)
1677{
1678	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1679
1680	ipoib_dbg(priv, "%s\n", __func__);
1681	init_completion(&priv->ntbl.deleted);
1682
1683	cancel_delayed_work_sync(&priv->neigh_reap_task);
1684
1685	ipoib_flush_neighs(priv);
1686
1687	wait_for_completion(&priv->ntbl.deleted);
1688}
1689
1690static void ipoib_napi_add(struct net_device *dev)
1691{
1692	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1693
1694	netif_napi_add_weight(dev, &priv->recv_napi, ipoib_rx_poll,
1695			      IPOIB_NUM_WC);
1696	netif_napi_add_weight(dev, &priv->send_napi, ipoib_tx_poll,
1697			      MAX_SEND_CQE);
1698}
1699
1700static void ipoib_napi_del(struct net_device *dev)
1701{
1702	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1703
1704	netif_napi_del(&priv->recv_napi);
1705	netif_napi_del(&priv->send_napi);
1706}
1707
1708static void ipoib_dev_uninit_default(struct net_device *dev)
1709{
1710	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1711
1712	ipoib_transport_dev_cleanup(dev);
1713
1714	ipoib_napi_del(dev);
1715
1716	ipoib_cm_dev_cleanup(dev);
1717
1718	kfree(priv->rx_ring);
1719	vfree(priv->tx_ring);
1720
1721	priv->rx_ring = NULL;
1722	priv->tx_ring = NULL;
1723}
1724
1725static int ipoib_dev_init_default(struct net_device *dev)
1726{
1727	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1728	u8 addr_mod[3];
1729
1730	ipoib_napi_add(dev);
1731
1732	/* Allocate RX/TX "rings" to hold queued skbs */
1733	priv->rx_ring =	kcalloc(ipoib_recvq_size,
1734				       sizeof(*priv->rx_ring),
1735				       GFP_KERNEL);
1736	if (!priv->rx_ring)
1737		goto out;
1738
1739	priv->tx_ring = vzalloc(array_size(ipoib_sendq_size,
1740					   sizeof(*priv->tx_ring)));
1741	if (!priv->tx_ring) {
1742		pr_warn("%s: failed to allocate TX ring (%d entries)\n",
1743			priv->ca->name, ipoib_sendq_size);
1744		goto out_rx_ring_cleanup;
1745	}
1746
1747	/* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */
1748
1749	if (ipoib_transport_dev_init(dev, priv->ca)) {
1750		pr_warn("%s: ipoib_transport_dev_init failed\n",
1751			priv->ca->name);
1752		goto out_tx_ring_cleanup;
1753	}
1754
1755	/* after qp created set dev address */
1756	addr_mod[0] = (priv->qp->qp_num >> 16) & 0xff;
1757	addr_mod[1] = (priv->qp->qp_num >>  8) & 0xff;
1758	addr_mod[2] = (priv->qp->qp_num) & 0xff;
1759	dev_addr_mod(priv->dev, 1, addr_mod, sizeof(addr_mod));
1760
1761	return 0;
1762
1763out_tx_ring_cleanup:
1764	vfree(priv->tx_ring);
1765
1766out_rx_ring_cleanup:
1767	kfree(priv->rx_ring);
1768
1769out:
1770	ipoib_napi_del(dev);
1771	return -ENOMEM;
1772}
1773
1774static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
1775		       int cmd)
1776{
1777	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1778
1779	if (!priv->rn_ops->ndo_eth_ioctl)
1780		return -EOPNOTSUPP;
1781
1782	return priv->rn_ops->ndo_eth_ioctl(dev, ifr, cmd);
1783}
1784
1785static int ipoib_dev_init(struct net_device *dev)
1786{
1787	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1788	int ret = -ENOMEM;
1789
1790	priv->qp = NULL;
1791
1792	/*
1793	 * the various IPoIB tasks assume they will never race against
1794	 * themselves, so always use a single thread workqueue
1795	 */
1796	priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM);
1797	if (!priv->wq) {
1798		pr_warn("%s: failed to allocate device WQ\n", dev->name);
1799		goto out;
1800	}
1801
1802	/* create pd, which used both for control and datapath*/
1803	priv->pd = ib_alloc_pd(priv->ca, 0);
1804	if (IS_ERR(priv->pd)) {
1805		pr_warn("%s: failed to allocate PD\n", priv->ca->name);
1806		goto clean_wq;
1807	}
1808
1809	ret = priv->rn_ops->ndo_init(dev);
1810	if (ret) {
1811		pr_warn("%s failed to init HW resource\n", dev->name);
1812		goto out_free_pd;
1813	}
1814
1815	ret = ipoib_neigh_hash_init(priv);
1816	if (ret) {
1817		pr_warn("%s failed to init neigh hash\n", dev->name);
1818		goto out_dev_uninit;
1819	}
1820
1821	if (dev->flags & IFF_UP) {
1822		if (ipoib_ib_dev_open(dev)) {
1823			pr_warn("%s failed to open device\n", dev->name);
1824			ret = -ENODEV;
1825			goto out_hash_uninit;
1826		}
1827	}
1828
1829	return 0;
1830
1831out_hash_uninit:
1832	ipoib_neigh_hash_uninit(dev);
1833
1834out_dev_uninit:
1835	ipoib_ib_dev_cleanup(dev);
1836
1837out_free_pd:
1838	if (priv->pd) {
1839		ib_dealloc_pd(priv->pd);
1840		priv->pd = NULL;
1841	}
1842
1843clean_wq:
1844	if (priv->wq) {
1845		destroy_workqueue(priv->wq);
1846		priv->wq = NULL;
1847	}
1848
1849out:
1850	return ret;
1851}
1852
1853/*
1854 * This must be called before doing an unregister_netdev on a parent device to
1855 * shutdown the IB event handler.
1856 */
1857static void ipoib_parent_unregister_pre(struct net_device *ndev)
1858{
1859	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1860
1861	/*
1862	 * ipoib_set_mac checks netif_running before pushing work, clearing
1863	 * running ensures the it will not add more work.
1864	 */
1865	rtnl_lock();
1866	dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP, NULL);
1867	rtnl_unlock();
1868
1869	/* ipoib_event() cannot be running once this returns */
1870	ib_unregister_event_handler(&priv->event_handler);
1871
1872	/*
1873	 * Work on the queue grabs the rtnl lock, so this cannot be done while
1874	 * also holding it.
1875	 */
1876	flush_workqueue(ipoib_workqueue);
1877}
1878
1879static void ipoib_set_dev_features(struct ipoib_dev_priv *priv)
1880{
1881	priv->hca_caps = priv->ca->attrs.device_cap_flags;
1882	priv->kernel_caps = priv->ca->attrs.kernel_cap_flags;
1883
1884	if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1885		priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1886
1887		if (priv->kernel_caps & IBK_UD_TSO)
1888			priv->dev->hw_features |= NETIF_F_TSO;
1889
1890		priv->dev->features |= priv->dev->hw_features;
1891	}
1892}
1893
1894static int ipoib_parent_init(struct net_device *ndev)
1895{
1896	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1897	struct ib_port_attr attr;
1898	int result;
1899
1900	result = ib_query_port(priv->ca, priv->port, &attr);
1901	if (result) {
1902		pr_warn("%s: ib_query_port %d failed\n", priv->ca->name,
1903			priv->port);
1904		return result;
1905	}
1906	priv->max_ib_mtu = rdma_mtu_from_attr(priv->ca, priv->port, &attr);
1907
1908	result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
1909	if (result) {
1910		pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n",
1911			priv->ca->name, priv->port, result);
1912		return result;
1913	}
1914
1915	result = rdma_query_gid(priv->ca, priv->port, 0, &priv->local_gid);
1916	if (result) {
1917		pr_warn("%s: rdma_query_gid port %d failed (ret = %d)\n",
1918			priv->ca->name, priv->port, result);
1919		return result;
1920	}
1921	dev_addr_mod(priv->dev, 4, priv->local_gid.raw, sizeof(union ib_gid));
 
1922
1923	SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent);
1924	priv->dev->dev_port = priv->port - 1;
1925	/* Let's set this one too for backwards compatibility. */
1926	priv->dev->dev_id = priv->port - 1;
1927
1928	return 0;
1929}
1930
1931static void ipoib_child_init(struct net_device *ndev)
1932{
1933	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1934	struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
1935
1936	priv->max_ib_mtu = ppriv->max_ib_mtu;
1937	set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
1938	if (memchr_inv(priv->dev->dev_addr, 0, INFINIBAND_ALEN))
1939		memcpy(&priv->local_gid, priv->dev->dev_addr + 4,
1940		       sizeof(priv->local_gid));
1941	else {
1942		__dev_addr_set(priv->dev, ppriv->dev->dev_addr,
1943			       INFINIBAND_ALEN);
1944		memcpy(&priv->local_gid, &ppriv->local_gid,
1945		       sizeof(priv->local_gid));
1946	}
1947}
1948
1949static int ipoib_ndo_init(struct net_device *ndev)
1950{
1951	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1952	int rc;
1953	struct rdma_netdev *rn = netdev_priv(ndev);
1954
1955	if (priv->parent) {
1956		ipoib_child_init(ndev);
1957	} else {
1958		rc = ipoib_parent_init(ndev);
1959		if (rc)
1960			return rc;
1961	}
1962
1963	/* MTU will be reset when mcast join happens */
1964	ndev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
1965	priv->mcast_mtu = priv->admin_mtu = ndev->mtu;
1966	rn->mtu = priv->mcast_mtu;
1967	ndev->max_mtu = IPOIB_CM_MTU;
1968
1969	ndev->neigh_priv_len = sizeof(struct ipoib_neigh);
1970
1971	/*
1972	 * Set the full membership bit, so that we join the right
1973	 * broadcast group, etc.
1974	 */
1975	priv->pkey |= 0x8000;
1976
1977	ndev->broadcast[8] = priv->pkey >> 8;
1978	ndev->broadcast[9] = priv->pkey & 0xff;
1979	set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
1980
1981	ipoib_set_dev_features(priv);
1982
1983	rc = ipoib_dev_init(ndev);
1984	if (rc) {
1985		pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n",
1986			priv->ca->name, priv->dev->name, priv->port, rc);
1987		return rc;
1988	}
1989
1990	if (priv->parent) {
1991		struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
1992
1993		dev_hold(priv->parent);
1994
1995		down_write(&ppriv->vlan_rwsem);
1996		list_add_tail(&priv->list, &ppriv->child_intfs);
1997		up_write(&ppriv->vlan_rwsem);
1998	}
1999
2000	return 0;
2001}
2002
2003static void ipoib_ndo_uninit(struct net_device *dev)
2004{
2005	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2006
2007	ASSERT_RTNL();
2008
2009	/*
2010	 * ipoib_remove_one guarantees the children are removed before the
2011	 * parent, and that is the only place where a parent can be removed.
2012	 */
2013	WARN_ON(!list_empty(&priv->child_intfs));
2014
2015	if (priv->parent) {
2016		struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
2017
2018		down_write(&ppriv->vlan_rwsem);
2019		list_del(&priv->list);
2020		up_write(&ppriv->vlan_rwsem);
2021	}
2022
2023	ipoib_neigh_hash_uninit(dev);
2024
2025	ipoib_ib_dev_cleanup(dev);
2026
2027	/* no more works over the priv->wq */
2028	if (priv->wq) {
2029		/* See ipoib_mcast_carrier_on_task() */
2030		WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags));
2031		destroy_workqueue(priv->wq);
2032		priv->wq = NULL;
2033	}
2034
2035	dev_put(priv->parent);
 
2036}
2037
2038static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2039{
2040	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2041
2042	return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state);
2043}
2044
2045static int ipoib_get_vf_config(struct net_device *dev, int vf,
2046			       struct ifla_vf_info *ivf)
2047{
2048	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2049	int err;
2050
2051	err = ib_get_vf_config(priv->ca, vf, priv->port, ivf);
2052	if (err)
2053		return err;
2054
2055	ivf->vf = vf;
2056	memcpy(ivf->mac, dev->dev_addr, dev->addr_len);
2057
2058	return 0;
2059}
2060
2061static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
2062{
2063	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2064
2065	if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID)
2066		return -EINVAL;
2067
2068	return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
2069}
2070
2071static int ipoib_get_vf_guid(struct net_device *dev, int vf,
2072			     struct ifla_vf_guid *node_guid,
2073			     struct ifla_vf_guid *port_guid)
2074{
2075	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2076
2077	return ib_get_vf_guid(priv->ca, vf, priv->port, node_guid, port_guid);
2078}
2079
2080static int ipoib_get_vf_stats(struct net_device *dev, int vf,
2081			      struct ifla_vf_stats *vf_stats)
2082{
2083	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2084
2085	return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats);
2086}
2087
2088static const struct header_ops ipoib_header_ops = {
2089	.create	= ipoib_hard_header,
2090};
2091
2092static const struct net_device_ops ipoib_netdev_ops_pf = {
2093	.ndo_init		 = ipoib_ndo_init,
2094	.ndo_uninit		 = ipoib_ndo_uninit,
2095	.ndo_open		 = ipoib_open,
2096	.ndo_stop		 = ipoib_stop,
2097	.ndo_change_mtu		 = ipoib_change_mtu,
2098	.ndo_fix_features	 = ipoib_fix_features,
2099	.ndo_start_xmit		 = ipoib_start_xmit,
2100	.ndo_tx_timeout		 = ipoib_timeout,
2101	.ndo_set_rx_mode	 = ipoib_set_mcast_list,
2102	.ndo_get_iflink		 = ipoib_get_iflink,
2103	.ndo_set_vf_link_state	 = ipoib_set_vf_link_state,
2104	.ndo_get_vf_config	 = ipoib_get_vf_config,
2105	.ndo_get_vf_stats	 = ipoib_get_vf_stats,
2106	.ndo_get_vf_guid	 = ipoib_get_vf_guid,
2107	.ndo_set_vf_guid	 = ipoib_set_vf_guid,
2108	.ndo_set_mac_address	 = ipoib_set_mac,
2109	.ndo_get_stats64	 = ipoib_get_stats,
2110	.ndo_eth_ioctl		 = ipoib_ioctl,
2111};
2112
2113static const struct net_device_ops ipoib_netdev_ops_vf = {
2114	.ndo_init		 = ipoib_ndo_init,
2115	.ndo_uninit		 = ipoib_ndo_uninit,
2116	.ndo_open		 = ipoib_open,
2117	.ndo_stop		 = ipoib_stop,
2118	.ndo_change_mtu		 = ipoib_change_mtu,
2119	.ndo_fix_features	 = ipoib_fix_features,
2120	.ndo_start_xmit	 	 = ipoib_start_xmit,
2121	.ndo_tx_timeout		 = ipoib_timeout,
2122	.ndo_set_rx_mode	 = ipoib_set_mcast_list,
2123	.ndo_get_iflink		 = ipoib_get_iflink,
2124	.ndo_get_stats64	 = ipoib_get_stats,
2125	.ndo_eth_ioctl		 = ipoib_ioctl,
2126};
2127
2128static const struct net_device_ops ipoib_netdev_default_pf = {
2129	.ndo_init		 = ipoib_dev_init_default,
2130	.ndo_uninit		 = ipoib_dev_uninit_default,
2131	.ndo_open		 = ipoib_ib_dev_open_default,
2132	.ndo_stop		 = ipoib_ib_dev_stop_default,
2133};
2134
2135void ipoib_setup_common(struct net_device *dev)
2136{
2137	dev->header_ops		 = &ipoib_header_ops;
2138	dev->netdev_ops          = &ipoib_netdev_default_pf;
2139
2140	ipoib_set_ethtool_ops(dev);
2141
2142	dev->watchdog_timeo	 = 10 * HZ;
2143
2144	dev->flags		|= IFF_BROADCAST | IFF_MULTICAST;
2145
2146	dev->hard_header_len	 = IPOIB_HARD_LEN;
2147	dev->addr_len		 = INFINIBAND_ALEN;
2148	dev->type		 = ARPHRD_INFINIBAND;
2149	dev->tx_queue_len	 = ipoib_sendq_size * 2;
2150	dev->features		 = (NETIF_F_VLAN_CHALLENGED	|
2151				    NETIF_F_HIGHDMA);
2152	netif_keep_dst(dev);
2153
2154	memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
2155
2156	/*
2157	 * unregister_netdev always frees the netdev, we use this mode
2158	 * consistently to unify all the various unregister paths, including
2159	 * those connected to rtnl_link_ops which require it.
2160	 */
2161	dev->needs_free_netdev = true;
2162}
2163
2164static void ipoib_build_priv(struct net_device *dev)
2165{
2166	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2167
2168	priv->dev = dev;
2169	spin_lock_init(&priv->lock);
2170	init_rwsem(&priv->vlan_rwsem);
2171	mutex_init(&priv->mcast_mutex);
2172
2173	INIT_LIST_HEAD(&priv->path_list);
2174	INIT_LIST_HEAD(&priv->child_intfs);
2175	INIT_LIST_HEAD(&priv->dead_ahs);
2176	INIT_LIST_HEAD(&priv->multicast_list);
2177
2178	INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
2179	INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
2180	INIT_WORK(&priv->reschedule_napi_work, ipoib_napi_schedule_work);
2181	INIT_WORK(&priv->flush_light,   ipoib_ib_dev_flush_light);
2182	INIT_WORK(&priv->flush_normal,   ipoib_ib_dev_flush_normal);
2183	INIT_WORK(&priv->flush_heavy,   ipoib_ib_dev_flush_heavy);
2184	INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
2185	INIT_WORK(&priv->tx_timeout_work, ipoib_ib_tx_timeout_work);
2186	INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
2187	INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
2188}
2189
2190static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u32 port,
 
 
 
 
 
 
 
2191					     const char *name)
2192{
2193	struct net_device *dev;
2194
2195	dev = rdma_alloc_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
2196				NET_NAME_UNKNOWN, ipoib_setup_common);
2197	if (!IS_ERR(dev) || PTR_ERR(dev) != -EOPNOTSUPP)
2198		return dev;
2199
2200	dev = alloc_netdev(sizeof(struct rdma_netdev), name, NET_NAME_UNKNOWN,
2201			   ipoib_setup_common);
2202	if (!dev)
2203		return ERR_PTR(-ENOMEM);
2204	return dev;
2205}
2206
2207int ipoib_intf_init(struct ib_device *hca, u32 port, const char *name,
2208		    struct net_device *dev)
2209{
2210	struct rdma_netdev *rn = netdev_priv(dev);
2211	struct ipoib_dev_priv *priv;
2212	int rc;
2213
2214	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
2215	if (!priv)
2216		return -ENOMEM;
2217
2218	priv->ca = hca;
2219	priv->port = port;
2220
2221	rc = rdma_init_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
2222			      NET_NAME_UNKNOWN, ipoib_setup_common, dev);
2223	if (rc) {
2224		if (rc != -EOPNOTSUPP)
2225			goto out;
2226
 
2227		rn->send = ipoib_send;
2228		rn->attach_mcast = ipoib_mcast_attach;
2229		rn->detach_mcast = ipoib_mcast_detach;
2230		rn->hca = hca;
2231
2232		rc = netif_set_real_num_tx_queues(dev, 1);
2233		if (rc)
2234			goto out;
2235
2236		rc = netif_set_real_num_rx_queues(dev, 1);
2237		if (rc)
2238			goto out;
2239	}
2240
2241	priv->rn_ops = dev->netdev_ops;
2242
2243	if (hca->attrs.kernel_cap_flags & IBK_VIRTUAL_FUNCTION)
2244		dev->netdev_ops	= &ipoib_netdev_ops_vf;
2245	else
2246		dev->netdev_ops	= &ipoib_netdev_ops_pf;
2247
2248	rn->clnt_priv = priv;
2249	/*
2250	 * Only the child register_netdev flows can handle priv_destructor
2251	 * being set, so we force it to NULL here and handle manually until it
2252	 * is safe to turn on.
2253	 */
2254	priv->next_priv_destructor = dev->priv_destructor;
2255	dev->priv_destructor = NULL;
2256
2257	ipoib_build_priv(dev);
2258
2259	return 0;
2260
2261out:
2262	kfree(priv);
2263	return rc;
2264}
2265
2266struct net_device *ipoib_intf_alloc(struct ib_device *hca, u32 port,
2267				    const char *name)
2268{
2269	struct net_device *dev;
2270	int rc;
2271
2272	dev = ipoib_alloc_netdev(hca, port, name);
2273	if (IS_ERR(dev))
2274		return dev;
2275
2276	rc = ipoib_intf_init(hca, port, name, dev);
2277	if (rc) {
2278		free_netdev(dev);
2279		return ERR_PTR(rc);
2280	}
2281
2282	/*
2283	 * Upon success the caller must ensure ipoib_intf_free is called or
2284	 * register_netdevice succeed'd and priv_destructor is set to
2285	 * ipoib_intf_free.
2286	 */
2287	return dev;
2288}
2289
2290void ipoib_intf_free(struct net_device *dev)
2291{
2292	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2293	struct rdma_netdev *rn = netdev_priv(dev);
2294
2295	dev->priv_destructor = priv->next_priv_destructor;
2296	if (dev->priv_destructor)
2297		dev->priv_destructor(dev);
2298
2299	/*
2300	 * There are some error flows around register_netdev failing that may
2301	 * attempt to call priv_destructor twice, prevent that from happening.
2302	 */
2303	dev->priv_destructor = NULL;
2304
2305	/* unregister/destroy is very complicated. Make bugs more obvious. */
2306	rn->clnt_priv = NULL;
2307
2308	kfree(priv);
2309}
2310
2311static ssize_t pkey_show(struct device *dev, struct device_attribute *attr,
2312			 char *buf)
2313{
2314	struct net_device *ndev = to_net_dev(dev);
2315	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
2316
2317	return sysfs_emit(buf, "0x%04x\n", priv->pkey);
2318}
2319static DEVICE_ATTR_RO(pkey);
2320
2321static ssize_t umcast_show(struct device *dev, struct device_attribute *attr,
2322			   char *buf)
2323{
2324	struct net_device *ndev = to_net_dev(dev);
2325	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
2326
2327	return sysfs_emit(buf, "%d\n",
2328			  test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
2329}
2330
2331void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
2332{
2333	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
2334
2335	if (umcast_val > 0) {
2336		set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
2337		ipoib_warn(priv, "ignoring multicast groups joined directly "
2338				"by userspace\n");
2339	} else
2340		clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
2341}
2342
2343static ssize_t umcast_store(struct device *dev, struct device_attribute *attr,
2344			    const char *buf, size_t count)
 
2345{
2346	unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
2347
2348	ipoib_set_umcast(to_net_dev(dev), umcast_val);
2349
2350	return count;
2351}
2352static DEVICE_ATTR_RW(umcast);
2353
2354int ipoib_add_umcast_attr(struct net_device *dev)
2355{
2356	return device_create_file(&dev->dev, &dev_attr_umcast);
2357}
2358
2359static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
2360{
2361	struct ipoib_dev_priv *child_priv;
2362	struct net_device *netdev = priv->dev;
2363
2364	netif_addr_lock_bh(netdev);
2365
2366	memcpy(&priv->local_gid.global.interface_id,
2367	       &gid->global.interface_id,
2368	       sizeof(gid->global.interface_id));
2369	dev_addr_mod(netdev, 4, (u8 *)&priv->local_gid, sizeof(priv->local_gid));
2370	clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
2371
2372	netif_addr_unlock_bh(netdev);
2373
2374	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
2375		down_read(&priv->vlan_rwsem);
2376		list_for_each_entry(child_priv, &priv->child_intfs, list)
2377			set_base_guid(child_priv, gid);
2378		up_read(&priv->vlan_rwsem);
2379	}
2380}
2381
2382static int ipoib_check_lladdr(struct net_device *dev,
2383			      struct sockaddr_storage *ss)
2384{
2385	union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
2386	int ret = 0;
2387
2388	netif_addr_lock_bh(dev);
2389
2390	/* Make sure the QPN, reserved and subnet prefix match the current
2391	 * lladdr, it also makes sure the lladdr is unicast.
2392	 */
2393	if (memcmp(dev->dev_addr, ss->__data,
2394		   4 + sizeof(gid->global.subnet_prefix)) ||
2395	    gid->global.interface_id == 0)
2396		ret = -EINVAL;
2397
2398	netif_addr_unlock_bh(dev);
2399
2400	return ret;
2401}
2402
2403static int ipoib_set_mac(struct net_device *dev, void *addr)
2404{
2405	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2406	struct sockaddr_storage *ss = addr;
2407	int ret;
2408
2409	if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
2410		return -EBUSY;
2411
2412	ret = ipoib_check_lladdr(dev, ss);
2413	if (ret)
2414		return ret;
2415
2416	set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
2417
2418	queue_work(ipoib_workqueue, &priv->flush_light);
2419
2420	return 0;
2421}
2422
2423static ssize_t create_child_store(struct device *dev,
2424				  struct device_attribute *attr,
2425				  const char *buf, size_t count)
2426{
2427	int pkey;
2428	int ret;
2429
2430	if (sscanf(buf, "%i", &pkey) != 1)
2431		return -EINVAL;
2432
2433	if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
2434		return -EINVAL;
2435
2436	ret = ipoib_vlan_add(to_net_dev(dev), pkey);
2437
2438	return ret ? ret : count;
2439}
2440static DEVICE_ATTR_WO(create_child);
2441
2442static ssize_t delete_child_store(struct device *dev,
2443				  struct device_attribute *attr,
2444				  const char *buf, size_t count)
2445{
2446	int pkey;
2447	int ret;
2448
2449	if (sscanf(buf, "%i", &pkey) != 1)
2450		return -EINVAL;
2451
2452	if (pkey < 0 || pkey > 0xffff)
2453		return -EINVAL;
2454
2455	ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
2456
2457	return ret ? ret : count;
2458
2459}
2460static DEVICE_ATTR_WO(delete_child);
2461
2462int ipoib_add_pkey_attr(struct net_device *dev)
2463{
2464	return device_create_file(&dev->dev, &dev_attr_pkey);
2465}
2466
2467/*
2468 * We erroneously exposed the iface's port number in the dev_id
2469 * sysfs field long after dev_port was introduced for that purpose[1],
2470 * and we need to stop everyone from relying on that.
2471 * Let's overload the shower routine for the dev_id file here
2472 * to gently bring the issue up.
2473 *
2474 * [1] https://www.spinics.net/lists/netdev/msg272123.html
2475 */
2476static ssize_t dev_id_show(struct device *dev,
2477			   struct device_attribute *attr, char *buf)
2478{
2479	struct net_device *ndev = to_net_dev(dev);
2480
2481	/*
2482	 * ndev->dev_port will be equal to 0 in old kernel prior to commit
2483	 * 9b8b2a323008 ("IB/ipoib: Use dev_port to expose network interface
2484	 * port numbers") Zero was chosen as special case for user space
2485	 * applications to fallback and query dev_id to check if it has
2486	 * different value or not.
2487	 *
2488	 * Don't print warning in such scenario.
2489	 *
2490	 * https://github.com/systemd/systemd/blob/master/src/udev/udev-builtin-net_id.c#L358
2491	 */
2492	if (ndev->dev_port && ndev->dev_id == ndev->dev_port)
2493		netdev_info_once(ndev,
2494			"\"%s\" wants to know my dev_id. Should it look at dev_port instead? See Documentation/ABI/testing/sysfs-class-net for more info.\n",
2495			current->comm);
2496
2497	return sysfs_emit(buf, "%#x\n", ndev->dev_id);
2498}
2499static DEVICE_ATTR_RO(dev_id);
2500
2501static int ipoib_intercept_dev_id_attr(struct net_device *dev)
2502{
2503	device_remove_file(&dev->dev, &dev_attr_dev_id);
2504	return device_create_file(&dev->dev, &dev_attr_dev_id);
2505}
2506
2507static struct net_device *ipoib_add_port(const char *format,
2508					 struct ib_device *hca, u32 port)
2509{
2510	struct rtnl_link_ops *ops = ipoib_get_link_ops();
2511	struct rdma_netdev_alloc_params params;
2512	struct ipoib_dev_priv *priv;
2513	struct net_device *ndev;
2514	int result;
2515
2516	ndev = ipoib_intf_alloc(hca, port, format);
2517	if (IS_ERR(ndev)) {
2518		pr_warn("%s, %d: ipoib_intf_alloc failed %ld\n", hca->name, port,
2519			PTR_ERR(ndev));
2520		return ndev;
2521	}
2522	priv = ipoib_priv(ndev);
2523
2524	INIT_IB_EVENT_HANDLER(&priv->event_handler,
2525			      priv->ca, ipoib_event);
2526	ib_register_event_handler(&priv->event_handler);
2527
2528	/* call event handler to ensure pkey in sync */
2529	queue_work(ipoib_workqueue, &priv->flush_heavy);
2530
2531	ndev->rtnl_link_ops = ipoib_get_link_ops();
2532
2533	result = register_netdev(ndev);
2534	if (result) {
2535		pr_warn("%s: couldn't register ipoib port %d; error %d\n",
2536			hca->name, port, result);
2537
2538		ipoib_parent_unregister_pre(ndev);
2539		ipoib_intf_free(ndev);
2540		free_netdev(ndev);
2541
2542		return ERR_PTR(result);
2543	}
2544
2545	if (hca->ops.rdma_netdev_get_params) {
2546		int rc = hca->ops.rdma_netdev_get_params(hca, port,
2547						     RDMA_NETDEV_IPOIB,
2548						     &params);
2549
2550		if (!rc && ops->priv_size < params.sizeof_priv)
2551			ops->priv_size = params.sizeof_priv;
2552	}
2553	/*
2554	 * We cannot set priv_destructor before register_netdev because we
2555	 * need priv to be always valid during the error flow to execute
2556	 * ipoib_parent_unregister_pre(). Instead handle it manually and only
2557	 * enter priv_destructor mode once we are completely registered.
2558	 */
2559	ndev->priv_destructor = ipoib_intf_free;
2560
2561	if (ipoib_intercept_dev_id_attr(ndev))
2562		goto sysfs_failed;
2563	if (ipoib_cm_add_mode_attr(ndev))
2564		goto sysfs_failed;
2565	if (ipoib_add_pkey_attr(ndev))
2566		goto sysfs_failed;
2567	if (ipoib_add_umcast_attr(ndev))
2568		goto sysfs_failed;
2569	if (device_create_file(&ndev->dev, &dev_attr_create_child))
2570		goto sysfs_failed;
2571	if (device_create_file(&ndev->dev, &dev_attr_delete_child))
2572		goto sysfs_failed;
2573
2574	return ndev;
2575
2576sysfs_failed:
2577	ipoib_parent_unregister_pre(ndev);
2578	unregister_netdev(ndev);
2579	return ERR_PTR(-ENOMEM);
2580}
2581
2582static int ipoib_add_one(struct ib_device *device)
2583{
2584	struct list_head *dev_list;
2585	struct net_device *dev;
2586	struct ipoib_dev_priv *priv;
2587	unsigned int p;
2588	int count = 0;
2589
2590	dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL);
2591	if (!dev_list)
2592		return -ENOMEM;
2593
2594	INIT_LIST_HEAD(dev_list);
2595
2596	rdma_for_each_port (device, p) {
2597		if (!rdma_protocol_ib(device, p))
2598			continue;
2599		dev = ipoib_add_port("ib%d", device, p);
2600		if (!IS_ERR(dev)) {
2601			priv = ipoib_priv(dev);
2602			list_add_tail(&priv->list, dev_list);
2603			count++;
2604		}
2605	}
2606
2607	if (!count) {
2608		kfree(dev_list);
2609		return -EOPNOTSUPP;
2610	}
2611
2612	ib_set_client_data(device, &ipoib_client, dev_list);
2613	return 0;
2614}
2615
2616static void ipoib_remove_one(struct ib_device *device, void *client_data)
2617{
2618	struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
2619	struct list_head *dev_list = client_data;
 
 
 
2620
2621	list_for_each_entry_safe(priv, tmp, dev_list, list) {
2622		LIST_HEAD(head);
2623		ipoib_parent_unregister_pre(priv->dev);
2624
2625		rtnl_lock();
2626
2627		list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs,
2628					 list)
2629			unregister_netdevice_queue(cpriv->dev, &head);
2630		unregister_netdevice_queue(priv->dev, &head);
2631		unregister_netdevice_many(&head);
2632
2633		rtnl_unlock();
2634	}
2635
2636	kfree(dev_list);
2637}
2638
2639#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2640static struct notifier_block ipoib_netdev_notifier = {
2641	.notifier_call = ipoib_netdev_event,
2642};
2643#endif
2644
2645static int __init ipoib_init_module(void)
2646{
2647	int ret;
2648
2649	ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
2650	ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
2651	ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
2652
2653	ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
2654	ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
2655	ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
2656#ifdef CONFIG_INFINIBAND_IPOIB_CM
2657	ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
2658	ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);
2659#endif
2660
2661	/*
2662	 * When copying small received packets, we only copy from the
2663	 * linear data part of the SKB, so we rely on this condition.
2664	 */
2665	BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
2666
2667	ipoib_register_debugfs();
2668
2669	/*
2670	 * We create a global workqueue here that is used for all flush
2671	 * operations.  However, if you attempt to flush a workqueue
2672	 * from a task on that same workqueue, it deadlocks the system.
2673	 * We want to be able to flush the tasks associated with a
2674	 * specific net device, so we also create a workqueue for each
2675	 * netdevice.  We queue up the tasks for that device only on
2676	 * its private workqueue, and we only queue up flush events
2677	 * on our global flush workqueue.  This avoids the deadlocks.
2678	 */
2679	ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush", 0);
2680	if (!ipoib_workqueue) {
2681		ret = -ENOMEM;
2682		goto err_fs;
2683	}
2684
2685	ib_sa_register_client(&ipoib_sa_client);
2686
2687	ret = ib_register_client(&ipoib_client);
2688	if (ret)
2689		goto err_sa;
2690
2691	ret = ipoib_netlink_init();
2692	if (ret)
2693		goto err_client;
2694
2695#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2696	register_netdevice_notifier(&ipoib_netdev_notifier);
2697#endif
2698	return 0;
2699
2700err_client:
2701	ib_unregister_client(&ipoib_client);
2702
2703err_sa:
2704	ib_sa_unregister_client(&ipoib_sa_client);
2705	destroy_workqueue(ipoib_workqueue);
2706
2707err_fs:
2708	ipoib_unregister_debugfs();
2709
2710	return ret;
2711}
2712
2713static void __exit ipoib_cleanup_module(void)
2714{
2715#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2716	unregister_netdevice_notifier(&ipoib_netdev_notifier);
2717#endif
2718	ipoib_netlink_fini();
2719	ib_unregister_client(&ipoib_client);
2720	ib_sa_unregister_client(&ipoib_sa_client);
2721	ipoib_unregister_debugfs();
2722	destroy_workqueue(ipoib_workqueue);
2723}
2724
2725module_init(ipoib_init_module);
2726module_exit(ipoib_cleanup_module);
v5.4
   1/*
   2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include "ipoib.h"
  36
  37#include <linux/module.h>
  38
  39#include <linux/init.h>
  40#include <linux/slab.h>
  41#include <linux/kernel.h>
  42#include <linux/vmalloc.h>
  43
  44#include <linux/if_arp.h>	/* For ARPHRD_xxx */
  45
  46#include <linux/ip.h>
  47#include <linux/in.h>
  48
  49#include <linux/jhash.h>
  50#include <net/arp.h>
  51#include <net/addrconf.h>
  52#include <linux/inetdevice.h>
  53#include <rdma/ib_cache.h>
  54
  55#define DRV_VERSION "1.0.0"
  56
  57const char ipoib_driver_version[] = DRV_VERSION;
  58
  59MODULE_AUTHOR("Roland Dreier");
  60MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
  61MODULE_LICENSE("Dual BSD/GPL");
  62
  63int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
  64int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
  65
  66module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
  67MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
  68module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
  69MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
  70
  71#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
  72int ipoib_debug_level;
  73
  74module_param_named(debug_level, ipoib_debug_level, int, 0644);
  75MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
  76#endif
  77
  78struct ipoib_path_iter {
  79	struct net_device *dev;
  80	struct ipoib_path  path;
  81};
  82
  83static const u8 ipv4_bcast_addr[] = {
  84	0x00, 0xff, 0xff, 0xff,
  85	0xff, 0x12, 0x40, 0x1b,	0x00, 0x00, 0x00, 0x00,
  86	0x00, 0x00, 0x00, 0x00,	0xff, 0xff, 0xff, 0xff
  87};
  88
  89struct workqueue_struct *ipoib_workqueue;
  90
  91struct ib_sa_client ipoib_sa_client;
  92
  93static void ipoib_add_one(struct ib_device *device);
  94static void ipoib_remove_one(struct ib_device *device, void *client_data);
  95static void ipoib_neigh_reclaim(struct rcu_head *rp);
  96static struct net_device *ipoib_get_net_dev_by_params(
  97		struct ib_device *dev, u8 port, u16 pkey,
  98		const union ib_gid *gid, const struct sockaddr *addr,
  99		void *client_data);
 100static int ipoib_set_mac(struct net_device *dev, void *addr);
 101static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
 102		       int cmd);
 103
 104static struct ib_client ipoib_client = {
 105	.name   = "ipoib",
 106	.add    = ipoib_add_one,
 107	.remove = ipoib_remove_one,
 108	.get_net_dev_by_params = ipoib_get_net_dev_by_params,
 109};
 110
 111#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
 112static int ipoib_netdev_event(struct notifier_block *this,
 113			      unsigned long event, void *ptr)
 114{
 115	struct netdev_notifier_info *ni = ptr;
 116	struct net_device *dev = ni->dev;
 117
 118	if (dev->netdev_ops->ndo_open != ipoib_open)
 119		return NOTIFY_DONE;
 120
 121	switch (event) {
 122	case NETDEV_REGISTER:
 123		ipoib_create_debug_files(dev);
 124		break;
 125	case NETDEV_CHANGENAME:
 126		ipoib_delete_debug_files(dev);
 127		ipoib_create_debug_files(dev);
 128		break;
 129	case NETDEV_UNREGISTER:
 130		ipoib_delete_debug_files(dev);
 131		break;
 132	}
 133
 134	return NOTIFY_DONE;
 135}
 136#endif
 137
 138int ipoib_open(struct net_device *dev)
 139{
 140	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 141
 142	ipoib_dbg(priv, "bringing up interface\n");
 143
 144	netif_carrier_off(dev);
 145
 146	set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
 147
 148	priv->sm_fullmember_sendonly_support = false;
 149
 150	if (ipoib_ib_dev_open(dev)) {
 151		if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
 152			return 0;
 153		goto err_disable;
 154	}
 155
 156	ipoib_ib_dev_up(dev);
 157
 158	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
 159		struct ipoib_dev_priv *cpriv;
 160
 161		/* Bring up any child interfaces too */
 162		down_read(&priv->vlan_rwsem);
 163		list_for_each_entry(cpriv, &priv->child_intfs, list) {
 164			int flags;
 165
 166			flags = cpriv->dev->flags;
 167			if (flags & IFF_UP)
 168				continue;
 169
 170			dev_change_flags(cpriv->dev, flags | IFF_UP, NULL);
 171		}
 172		up_read(&priv->vlan_rwsem);
 
 
 
 
 
 
 173	}
 174
 175	netif_start_queue(dev);
 176
 177	return 0;
 178
 179err_disable:
 180	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
 181
 182	return -EINVAL;
 183}
 184
 185static int ipoib_stop(struct net_device *dev)
 186{
 187	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 188
 189	ipoib_dbg(priv, "stopping interface\n");
 190
 191	clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
 192
 193	netif_stop_queue(dev);
 194
 195	ipoib_ib_dev_down(dev);
 196	ipoib_ib_dev_stop(dev);
 197
 198	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
 199		struct ipoib_dev_priv *cpriv;
 200
 201		/* Bring down any child interfaces too */
 202		down_read(&priv->vlan_rwsem);
 203		list_for_each_entry(cpriv, &priv->child_intfs, list) {
 204			int flags;
 205
 206			flags = cpriv->dev->flags;
 207			if (!(flags & IFF_UP))
 208				continue;
 209
 210			dev_change_flags(cpriv->dev, flags & ~IFF_UP, NULL);
 211		}
 212		up_read(&priv->vlan_rwsem);
 213	}
 214
 215	return 0;
 216}
 217
 218static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
 219{
 220	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 221
 222	if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
 223		features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
 224
 225	return features;
 226}
 227
 228static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
 229{
 230	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 231	int ret = 0;
 232
 233	/* dev->mtu > 2K ==> connected mode */
 234	if (ipoib_cm_admin_enabled(dev)) {
 235		if (new_mtu > ipoib_cm_max_mtu(dev))
 236			return -EINVAL;
 237
 238		if (new_mtu > priv->mcast_mtu)
 239			ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
 240				   priv->mcast_mtu);
 241
 242		dev->mtu = new_mtu;
 243		return 0;
 244	}
 245
 246	if (new_mtu < (ETH_MIN_MTU + IPOIB_ENCAP_LEN) ||
 247	    new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
 248		return -EINVAL;
 249
 250	priv->admin_mtu = new_mtu;
 251
 252	if (priv->mcast_mtu < priv->admin_mtu)
 253		ipoib_dbg(priv, "MTU must be smaller than the underlying "
 254				"link layer MTU - 4 (%u)\n", priv->mcast_mtu);
 255
 256	new_mtu = min(priv->mcast_mtu, priv->admin_mtu);
 257
 258	if (priv->rn_ops->ndo_change_mtu) {
 259		bool carrier_status = netif_carrier_ok(dev);
 260
 261		netif_carrier_off(dev);
 262
 263		/* notify lower level on the real mtu */
 264		ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu);
 265
 266		if (carrier_status)
 267			netif_carrier_on(dev);
 268	} else {
 269		dev->mtu = new_mtu;
 270	}
 271
 272	return ret;
 273}
 274
 275static void ipoib_get_stats(struct net_device *dev,
 276			    struct rtnl_link_stats64 *stats)
 277{
 278	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 279
 280	if (priv->rn_ops->ndo_get_stats64)
 281		priv->rn_ops->ndo_get_stats64(dev, stats);
 282	else
 283		netdev_stats_to_stats64(stats, &dev->stats);
 284}
 285
 286/* Called with an RCU read lock taken */
 287static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
 288					struct net_device *dev)
 289{
 290	struct net *net = dev_net(dev);
 291	struct in_device *in_dev;
 292	struct sockaddr_in *addr_in = (struct sockaddr_in *)addr;
 293	struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr;
 294	__be32 ret_addr;
 295
 296	switch (addr->sa_family) {
 297	case AF_INET:
 298		in_dev = in_dev_get(dev);
 299		if (!in_dev)
 300			return false;
 301
 302		ret_addr = inet_confirm_addr(net, in_dev, 0,
 303					     addr_in->sin_addr.s_addr,
 304					     RT_SCOPE_HOST);
 305		in_dev_put(in_dev);
 306		if (ret_addr)
 307			return true;
 308
 309		break;
 310	case AF_INET6:
 311		if (IS_ENABLED(CONFIG_IPV6) &&
 312		    ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1))
 313			return true;
 314
 315		break;
 316	}
 317	return false;
 318}
 319
 320/**
 321 * Find the master net_device on top of the given net_device.
 322 * @dev: base IPoIB net_device
 323 *
 324 * Returns the master net_device with a reference held, or the same net_device
 325 * if no master exists.
 326 */
 327static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
 328{
 329	struct net_device *master;
 330
 331	rcu_read_lock();
 332	master = netdev_master_upper_dev_get_rcu(dev);
 333	if (master)
 334		dev_hold(master);
 335	rcu_read_unlock();
 336
 337	if (master)
 338		return master;
 339
 340	dev_hold(dev);
 341	return dev;
 342}
 343
 344struct ipoib_walk_data {
 345	const struct sockaddr *addr;
 346	struct net_device *result;
 347};
 348
 349static int ipoib_upper_walk(struct net_device *upper, void *_data)
 
 350{
 351	struct ipoib_walk_data *data = _data;
 352	int ret = 0;
 353
 354	if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) {
 355		dev_hold(upper);
 356		data->result = upper;
 357		ret = 1;
 358	}
 359
 360	return ret;
 361}
 362
 363/**
 364 * Find a net_device matching the given address, which is an upper device of
 365 * the given net_device.
 
 366 * @addr: IP address to look for.
 367 * @dev: base IPoIB net_device
 368 *
 369 * If found, returns the net_device with a reference held. Otherwise return
 370 * NULL.
 371 */
 372static struct net_device *ipoib_get_net_dev_match_addr(
 373		const struct sockaddr *addr, struct net_device *dev)
 374{
 
 375	struct ipoib_walk_data data = {
 376		.addr = addr,
 377	};
 378
 
 379	rcu_read_lock();
 380	if (ipoib_is_dev_match_addr_rcu(addr, dev)) {
 381		dev_hold(dev);
 382		data.result = dev;
 383		goto out;
 384	}
 385
 386	netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &data);
 387out:
 388	rcu_read_unlock();
 389	return data.result;
 390}
 391
 392/* returns the number of IPoIB netdevs on top a given ipoib device matching a
 393 * pkey_index and address, if one exists.
 394 *
 395 * @found_net_dev: contains a matching net_device if the return value >= 1,
 396 * with a reference held. */
 397static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
 398				     const union ib_gid *gid,
 399				     u16 pkey_index,
 400				     const struct sockaddr *addr,
 401				     int nesting,
 402				     struct net_device **found_net_dev)
 403{
 404	struct ipoib_dev_priv *child_priv;
 405	struct net_device *net_dev = NULL;
 406	int matches = 0;
 407
 408	if (priv->pkey_index == pkey_index &&
 409	    (!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) {
 410		if (!addr) {
 411			net_dev = ipoib_get_master_net_dev(priv->dev);
 412		} else {
 413			/* Verify the net_device matches the IP address, as
 414			 * IPoIB child devices currently share a GID. */
 415			net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev);
 416		}
 417		if (net_dev) {
 418			if (!*found_net_dev)
 419				*found_net_dev = net_dev;
 420			else
 421				dev_put(net_dev);
 422			++matches;
 423		}
 424	}
 425
 426	/* Check child interfaces */
 427	down_read_nested(&priv->vlan_rwsem, nesting);
 428	list_for_each_entry(child_priv, &priv->child_intfs, list) {
 429		matches += ipoib_match_gid_pkey_addr(child_priv, gid,
 430						    pkey_index, addr,
 431						    nesting + 1,
 432						    found_net_dev);
 433		if (matches > 1)
 434			break;
 435	}
 436	up_read(&priv->vlan_rwsem);
 437
 438	return matches;
 439}
 440
 441/* Returns the number of matching net_devs found (between 0 and 2). Also
 442 * return the matching net_device in the @net_dev parameter, holding a
 443 * reference to the net_device, if the number of matches >= 1 */
 444static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u8 port,
 445					 u16 pkey_index,
 446					 const union ib_gid *gid,
 447					 const struct sockaddr *addr,
 448					 struct net_device **net_dev)
 449{
 450	struct ipoib_dev_priv *priv;
 451	int matches = 0;
 452
 453	*net_dev = NULL;
 454
 455	list_for_each_entry(priv, dev_list, list) {
 456		if (priv->port != port)
 457			continue;
 458
 459		matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index,
 460						     addr, 0, net_dev);
 461		if (matches > 1)
 462			break;
 463	}
 464
 465	return matches;
 466}
 467
 468static struct net_device *ipoib_get_net_dev_by_params(
 469		struct ib_device *dev, u8 port, u16 pkey,
 470		const union ib_gid *gid, const struct sockaddr *addr,
 471		void *client_data)
 472{
 473	struct net_device *net_dev;
 474	struct list_head *dev_list = client_data;
 475	u16 pkey_index;
 476	int matches;
 477	int ret;
 478
 479	if (!rdma_protocol_ib(dev, port))
 480		return NULL;
 481
 482	ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index);
 483	if (ret)
 484		return NULL;
 485
 486	if (!dev_list)
 487		return NULL;
 488
 489	/* See if we can find a unique device matching the L2 parameters */
 490	matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
 491						gid, NULL, &net_dev);
 492
 493	switch (matches) {
 494	case 0:
 495		return NULL;
 496	case 1:
 497		return net_dev;
 498	}
 499
 500	dev_put(net_dev);
 501
 502	/* Couldn't find a unique device with L2 parameters only. Use L3
 503	 * address to uniquely match the net device */
 504	matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
 505						gid, addr, &net_dev);
 506	switch (matches) {
 507	case 0:
 508		return NULL;
 509	default:
 510		dev_warn_ratelimited(&dev->dev,
 511				     "duplicate IP address detected\n");
 512		/* Fall through */
 513	case 1:
 514		return net_dev;
 515	}
 516}
 517
 518int ipoib_set_mode(struct net_device *dev, const char *buf)
 519{
 520	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 521
 522	if ((test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
 523	     !strcmp(buf, "connected\n")) ||
 524	     (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
 525	     !strcmp(buf, "datagram\n"))) {
 526		return 0;
 527	}
 528
 529	/* flush paths if we switch modes so that connections are restarted */
 530	if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
 531		set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
 532		ipoib_warn(priv, "enabling connected mode "
 533			   "will cause multicast packet drops\n");
 534		netdev_update_features(dev);
 535		dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
 
 536		rtnl_unlock();
 537		priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
 538
 539		ipoib_flush_paths(dev);
 540		return (!rtnl_trylock()) ? -EBUSY : 0;
 541	}
 542
 543	if (!strcmp(buf, "datagram\n")) {
 544		clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
 545		netdev_update_features(dev);
 546		dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
 
 547		rtnl_unlock();
 548		ipoib_flush_paths(dev);
 549		return (!rtnl_trylock()) ? -EBUSY : 0;
 550	}
 551
 552	return -EINVAL;
 553}
 554
 555struct ipoib_path *__path_find(struct net_device *dev, void *gid)
 556{
 557	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 558	struct rb_node *n = priv->path_tree.rb_node;
 559	struct ipoib_path *path;
 560	int ret;
 561
 562	while (n) {
 563		path = rb_entry(n, struct ipoib_path, rb_node);
 564
 565		ret = memcmp(gid, path->pathrec.dgid.raw,
 566			     sizeof (union ib_gid));
 567
 568		if (ret < 0)
 569			n = n->rb_left;
 570		else if (ret > 0)
 571			n = n->rb_right;
 572		else
 573			return path;
 574	}
 575
 576	return NULL;
 577}
 578
 579static int __path_add(struct net_device *dev, struct ipoib_path *path)
 580{
 581	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 582	struct rb_node **n = &priv->path_tree.rb_node;
 583	struct rb_node *pn = NULL;
 584	struct ipoib_path *tpath;
 585	int ret;
 586
 587	while (*n) {
 588		pn = *n;
 589		tpath = rb_entry(pn, struct ipoib_path, rb_node);
 590
 591		ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
 592			     sizeof (union ib_gid));
 593		if (ret < 0)
 594			n = &pn->rb_left;
 595		else if (ret > 0)
 596			n = &pn->rb_right;
 597		else
 598			return -EEXIST;
 599	}
 600
 601	rb_link_node(&path->rb_node, pn, n);
 602	rb_insert_color(&path->rb_node, &priv->path_tree);
 603
 604	list_add_tail(&path->list, &priv->path_list);
 605
 606	return 0;
 607}
 608
 609static void path_free(struct net_device *dev, struct ipoib_path *path)
 610{
 611	struct sk_buff *skb;
 612
 613	while ((skb = __skb_dequeue(&path->queue)))
 614		dev_kfree_skb_irq(skb);
 615
 616	ipoib_dbg(ipoib_priv(dev), "%s\n", __func__);
 617
 618	/* remove all neigh connected to this path */
 619	ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
 620
 621	if (path->ah)
 622		ipoib_put_ah(path->ah);
 623
 624	kfree(path);
 625}
 626
 627#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
 628
 629struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
 630{
 631	struct ipoib_path_iter *iter;
 632
 633	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
 634	if (!iter)
 635		return NULL;
 636
 637	iter->dev = dev;
 638	memset(iter->path.pathrec.dgid.raw, 0, 16);
 639
 640	if (ipoib_path_iter_next(iter)) {
 641		kfree(iter);
 642		return NULL;
 643	}
 644
 645	return iter;
 646}
 647
 648int ipoib_path_iter_next(struct ipoib_path_iter *iter)
 649{
 650	struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
 651	struct rb_node *n;
 652	struct ipoib_path *path;
 653	int ret = 1;
 654
 655	spin_lock_irq(&priv->lock);
 656
 657	n = rb_first(&priv->path_tree);
 658
 659	while (n) {
 660		path = rb_entry(n, struct ipoib_path, rb_node);
 661
 662		if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
 663			   sizeof (union ib_gid)) < 0) {
 664			iter->path = *path;
 665			ret = 0;
 666			break;
 667		}
 668
 669		n = rb_next(n);
 670	}
 671
 672	spin_unlock_irq(&priv->lock);
 673
 674	return ret;
 675}
 676
 677void ipoib_path_iter_read(struct ipoib_path_iter *iter,
 678			  struct ipoib_path *path)
 679{
 680	*path = iter->path;
 681}
 682
 683#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
 684
 685void ipoib_mark_paths_invalid(struct net_device *dev)
 686{
 687	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 688	struct ipoib_path *path, *tp;
 689
 690	spin_lock_irq(&priv->lock);
 691
 692	list_for_each_entry_safe(path, tp, &priv->path_list, list) {
 693		ipoib_dbg(priv, "mark path LID 0x%08x GID %pI6 invalid\n",
 694			  be32_to_cpu(sa_path_get_dlid(&path->pathrec)),
 695			  path->pathrec.dgid.raw);
 696		if (path->ah)
 697			path->ah->valid = 0;
 698	}
 699
 700	spin_unlock_irq(&priv->lock);
 701}
 702
 703static void push_pseudo_header(struct sk_buff *skb, const char *daddr)
 704{
 705	struct ipoib_pseudo_header *phdr;
 706
 707	phdr = skb_push(skb, sizeof(*phdr));
 708	memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
 709}
 710
 711void ipoib_flush_paths(struct net_device *dev)
 712{
 713	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 714	struct ipoib_path *path, *tp;
 715	LIST_HEAD(remove_list);
 716	unsigned long flags;
 717
 718	netif_tx_lock_bh(dev);
 719	spin_lock_irqsave(&priv->lock, flags);
 720
 721	list_splice_init(&priv->path_list, &remove_list);
 722
 723	list_for_each_entry(path, &remove_list, list)
 724		rb_erase(&path->rb_node, &priv->path_tree);
 725
 726	list_for_each_entry_safe(path, tp, &remove_list, list) {
 727		if (path->query)
 728			ib_sa_cancel_query(path->query_id, path->query);
 729		spin_unlock_irqrestore(&priv->lock, flags);
 730		netif_tx_unlock_bh(dev);
 731		wait_for_completion(&path->done);
 732		path_free(dev, path);
 733		netif_tx_lock_bh(dev);
 734		spin_lock_irqsave(&priv->lock, flags);
 735	}
 736
 737	spin_unlock_irqrestore(&priv->lock, flags);
 738	netif_tx_unlock_bh(dev);
 739}
 740
 741static void path_rec_completion(int status,
 742				struct sa_path_rec *pathrec,
 743				void *path_ptr)
 744{
 745	struct ipoib_path *path = path_ptr;
 746	struct net_device *dev = path->dev;
 747	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 748	struct ipoib_ah *ah = NULL;
 749	struct ipoib_ah *old_ah = NULL;
 750	struct ipoib_neigh *neigh, *tn;
 751	struct sk_buff_head skqueue;
 752	struct sk_buff *skb;
 753	unsigned long flags;
 754
 755	if (!status)
 756		ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
 757			  be32_to_cpu(sa_path_get_dlid(pathrec)),
 758			  pathrec->dgid.raw);
 759	else
 760		ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
 761			  status, path->pathrec.dgid.raw);
 762
 763	skb_queue_head_init(&skqueue);
 764
 765	if (!status) {
 766		struct rdma_ah_attr av;
 767
 768		if (!ib_init_ah_attr_from_path(priv->ca, priv->port,
 769					       pathrec, &av, NULL)) {
 770			ah = ipoib_create_ah(dev, priv->pd, &av);
 771			rdma_destroy_ah_attr(&av);
 772		}
 773	}
 774
 775	spin_lock_irqsave(&priv->lock, flags);
 776
 777	if (!IS_ERR_OR_NULL(ah)) {
 778		/*
 779		 * pathrec.dgid is used as the database key from the LLADDR,
 780		 * it must remain unchanged even if the SA returns a different
 781		 * GID to use in the AH.
 782		 */
 783		if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw,
 784			   sizeof(union ib_gid))) {
 785			ipoib_dbg(
 786				priv,
 787				"%s got PathRec for gid %pI6 while asked for %pI6\n",
 788				dev->name, pathrec->dgid.raw,
 789				path->pathrec.dgid.raw);
 790			memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw,
 791			       sizeof(union ib_gid));
 792		}
 793
 794		path->pathrec = *pathrec;
 795
 796		old_ah   = path->ah;
 797		path->ah = ah;
 798
 799		ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
 800			  ah, be32_to_cpu(sa_path_get_dlid(pathrec)),
 801			  pathrec->sl);
 802
 803		while ((skb = __skb_dequeue(&path->queue)))
 804			__skb_queue_tail(&skqueue, skb);
 805
 806		list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
 807			if (neigh->ah) {
 808				WARN_ON(neigh->ah != old_ah);
 809				/*
 810				 * Dropping the ah reference inside
 811				 * priv->lock is safe here, because we
 812				 * will hold one more reference from
 813				 * the original value of path->ah (ie
 814				 * old_ah).
 815				 */
 816				ipoib_put_ah(neigh->ah);
 817			}
 818			kref_get(&path->ah->ref);
 819			neigh->ah = path->ah;
 820
 821			if (ipoib_cm_enabled(dev, neigh->daddr)) {
 822				if (!ipoib_cm_get(neigh))
 823					ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
 824									       path,
 825									       neigh));
 826				if (!ipoib_cm_get(neigh)) {
 827					ipoib_neigh_free(neigh);
 828					continue;
 829				}
 830			}
 831
 832			while ((skb = __skb_dequeue(&neigh->queue)))
 833				__skb_queue_tail(&skqueue, skb);
 834		}
 835		path->ah->valid = 1;
 836	}
 837
 838	path->query = NULL;
 839	complete(&path->done);
 840
 841	spin_unlock_irqrestore(&priv->lock, flags);
 842
 843	if (IS_ERR_OR_NULL(ah))
 844		ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
 845
 846	if (old_ah)
 847		ipoib_put_ah(old_ah);
 848
 849	while ((skb = __skb_dequeue(&skqueue))) {
 850		int ret;
 851		skb->dev = dev;
 852		ret = dev_queue_xmit(skb);
 853		if (ret)
 854			ipoib_warn(priv, "%s: dev_queue_xmit failed to re-queue packet, ret:%d\n",
 855				   __func__, ret);
 856	}
 857}
 858
 859static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path,
 860			  void *gid)
 861{
 862	path->dev = priv->dev;
 863
 864	if (rdma_cap_opa_ah(priv->ca, priv->port))
 865		path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
 866	else
 867		path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
 868
 869	memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid));
 870	path->pathrec.sgid	    = priv->local_gid;
 871	path->pathrec.pkey	    = cpu_to_be16(priv->pkey);
 872	path->pathrec.numb_path     = 1;
 873	path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
 874}
 875
 876static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
 877{
 878	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 879	struct ipoib_path *path;
 880
 881	if (!priv->broadcast)
 882		return NULL;
 883
 884	path = kzalloc(sizeof(*path), GFP_ATOMIC);
 885	if (!path)
 886		return NULL;
 887
 888	skb_queue_head_init(&path->queue);
 889
 890	INIT_LIST_HEAD(&path->neigh_list);
 891
 892	init_path_rec(priv, path, gid);
 893
 894	return path;
 895}
 896
 897static int path_rec_start(struct net_device *dev,
 898			  struct ipoib_path *path)
 899{
 900	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 901
 902	ipoib_dbg(priv, "Start path record lookup for %pI6\n",
 903		  path->pathrec.dgid.raw);
 904
 905	init_completion(&path->done);
 906
 907	path->query_id =
 908		ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
 909				   &path->pathrec,
 910				   IB_SA_PATH_REC_DGID		|
 911				   IB_SA_PATH_REC_SGID		|
 912				   IB_SA_PATH_REC_NUMB_PATH	|
 913				   IB_SA_PATH_REC_TRAFFIC_CLASS |
 914				   IB_SA_PATH_REC_PKEY,
 915				   1000, GFP_ATOMIC,
 916				   path_rec_completion,
 917				   path, &path->query);
 918	if (path->query_id < 0) {
 919		ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
 920		path->query = NULL;
 921		complete(&path->done);
 922		return path->query_id;
 923	}
 924
 925	return 0;
 926}
 927
 928static void neigh_refresh_path(struct ipoib_neigh *neigh, u8 *daddr,
 929			       struct net_device *dev)
 930{
 931	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 932	struct ipoib_path *path;
 933	unsigned long flags;
 934
 935	spin_lock_irqsave(&priv->lock, flags);
 936
 937	path = __path_find(dev, daddr + 4);
 938	if (!path)
 939		goto out;
 940	if (!path->query)
 941		path_rec_start(dev, path);
 942out:
 943	spin_unlock_irqrestore(&priv->lock, flags);
 944}
 945
 946static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
 947					  struct net_device *dev)
 948{
 949	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 950	struct rdma_netdev *rn = netdev_priv(dev);
 951	struct ipoib_path *path;
 952	struct ipoib_neigh *neigh;
 953	unsigned long flags;
 954
 955	spin_lock_irqsave(&priv->lock, flags);
 956	neigh = ipoib_neigh_alloc(daddr, dev);
 957	if (!neigh) {
 958		spin_unlock_irqrestore(&priv->lock, flags);
 959		++dev->stats.tx_dropped;
 960		dev_kfree_skb_any(skb);
 961		return NULL;
 962	}
 963
 964	/* To avoid race condition, make sure that the
 965	 * neigh will be added only once.
 966	 */
 967	if (unlikely(!list_empty(&neigh->list))) {
 968		spin_unlock_irqrestore(&priv->lock, flags);
 969		return neigh;
 970	}
 971
 972	path = __path_find(dev, daddr + 4);
 973	if (!path) {
 974		path = path_rec_create(dev, daddr + 4);
 975		if (!path)
 976			goto err_path;
 977
 978		__path_add(dev, path);
 979	}
 980
 981	list_add_tail(&neigh->list, &path->neigh_list);
 982
 983	if (path->ah && path->ah->valid) {
 984		kref_get(&path->ah->ref);
 985		neigh->ah = path->ah;
 986
 987		if (ipoib_cm_enabled(dev, neigh->daddr)) {
 988			if (!ipoib_cm_get(neigh))
 989				ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
 990			if (!ipoib_cm_get(neigh)) {
 991				ipoib_neigh_free(neigh);
 992				goto err_drop;
 993			}
 994			if (skb_queue_len(&neigh->queue) <
 995			    IPOIB_MAX_PATH_REC_QUEUE) {
 996				push_pseudo_header(skb, neigh->daddr);
 997				__skb_queue_tail(&neigh->queue, skb);
 998			} else {
 999				ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
1000					   skb_queue_len(&neigh->queue));
1001				goto err_drop;
1002			}
1003		} else {
1004			spin_unlock_irqrestore(&priv->lock, flags);
1005			path->ah->last_send = rn->send(dev, skb, path->ah->ah,
1006						       IPOIB_QPN(daddr));
1007			ipoib_neigh_put(neigh);
1008			return NULL;
1009		}
1010	} else {
1011		neigh->ah  = NULL;
1012
1013		if (!path->query && path_rec_start(dev, path))
1014			goto err_path;
1015		if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1016			push_pseudo_header(skb, neigh->daddr);
1017			__skb_queue_tail(&neigh->queue, skb);
1018		} else {
1019			goto err_drop;
1020		}
1021	}
1022
1023	spin_unlock_irqrestore(&priv->lock, flags);
1024	ipoib_neigh_put(neigh);
1025	return NULL;
1026
1027err_path:
1028	ipoib_neigh_free(neigh);
1029err_drop:
1030	++dev->stats.tx_dropped;
1031	dev_kfree_skb_any(skb);
1032
1033	spin_unlock_irqrestore(&priv->lock, flags);
1034	ipoib_neigh_put(neigh);
1035
1036	return NULL;
1037}
1038
1039static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
1040			     struct ipoib_pseudo_header *phdr)
1041{
1042	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1043	struct rdma_netdev *rn = netdev_priv(dev);
1044	struct ipoib_path *path;
1045	unsigned long flags;
1046
1047	spin_lock_irqsave(&priv->lock, flags);
1048
1049	/* no broadcast means that all paths are (going to be) not valid */
1050	if (!priv->broadcast)
1051		goto drop_and_unlock;
1052
1053	path = __path_find(dev, phdr->hwaddr + 4);
1054	if (!path || !path->ah || !path->ah->valid) {
1055		if (!path) {
1056			path = path_rec_create(dev, phdr->hwaddr + 4);
1057			if (!path)
1058				goto drop_and_unlock;
1059			__path_add(dev, path);
1060		} else {
1061			/*
1062			 * make sure there are no changes in the existing
1063			 * path record
1064			 */
1065			init_path_rec(priv, path, phdr->hwaddr + 4);
1066		}
1067		if (!path->query && path_rec_start(dev, path)) {
1068			goto drop_and_unlock;
1069		}
1070
1071		if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1072			push_pseudo_header(skb, phdr->hwaddr);
1073			__skb_queue_tail(&path->queue, skb);
1074			goto unlock;
1075		} else {
1076			goto drop_and_unlock;
1077		}
1078	}
1079
1080	spin_unlock_irqrestore(&priv->lock, flags);
1081	ipoib_dbg(priv, "Send unicast ARP to %08x\n",
1082		  be32_to_cpu(sa_path_get_dlid(&path->pathrec)));
1083	path->ah->last_send = rn->send(dev, skb, path->ah->ah,
1084				       IPOIB_QPN(phdr->hwaddr));
1085	return;
1086
1087drop_and_unlock:
1088	++dev->stats.tx_dropped;
1089	dev_kfree_skb_any(skb);
1090unlock:
1091	spin_unlock_irqrestore(&priv->lock, flags);
1092}
1093
1094static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1095{
1096	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1097	struct rdma_netdev *rn = netdev_priv(dev);
1098	struct ipoib_neigh *neigh;
1099	struct ipoib_pseudo_header *phdr;
1100	struct ipoib_header *header;
1101	unsigned long flags;
1102
1103	phdr = (struct ipoib_pseudo_header *) skb->data;
1104	skb_pull(skb, sizeof(*phdr));
1105	header = (struct ipoib_header *) skb->data;
1106
1107	if (unlikely(phdr->hwaddr[4] == 0xff)) {
1108		/* multicast, arrange "if" according to probability */
1109		if ((header->proto != htons(ETH_P_IP)) &&
1110		    (header->proto != htons(ETH_P_IPV6)) &&
1111		    (header->proto != htons(ETH_P_ARP)) &&
1112		    (header->proto != htons(ETH_P_RARP)) &&
1113		    (header->proto != htons(ETH_P_TIPC))) {
1114			/* ethertype not supported by IPoIB */
1115			++dev->stats.tx_dropped;
1116			dev_kfree_skb_any(skb);
1117			return NETDEV_TX_OK;
1118		}
1119		/* Add in the P_Key for multicast*/
1120		phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
1121		phdr->hwaddr[9] = priv->pkey & 0xff;
1122
1123		neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1124		if (likely(neigh))
1125			goto send_using_neigh;
1126		ipoib_mcast_send(dev, phdr->hwaddr, skb);
1127		return NETDEV_TX_OK;
1128	}
1129
1130	/* unicast, arrange "switch" according to probability */
1131	switch (header->proto) {
1132	case htons(ETH_P_IP):
1133	case htons(ETH_P_IPV6):
1134	case htons(ETH_P_TIPC):
1135		neigh = ipoib_neigh_get(dev, phdr->hwaddr);
1136		if (unlikely(!neigh)) {
1137			neigh = neigh_add_path(skb, phdr->hwaddr, dev);
1138			if (likely(!neigh))
1139				return NETDEV_TX_OK;
1140		}
1141		break;
1142	case htons(ETH_P_ARP):
1143	case htons(ETH_P_RARP):
1144		/* for unicast ARP and RARP should always perform path find */
1145		unicast_arp_send(skb, dev, phdr);
1146		return NETDEV_TX_OK;
1147	default:
1148		/* ethertype not supported by IPoIB */
1149		++dev->stats.tx_dropped;
1150		dev_kfree_skb_any(skb);
1151		return NETDEV_TX_OK;
1152	}
1153
1154send_using_neigh:
1155	/* note we now hold a ref to neigh */
1156	if (ipoib_cm_get(neigh)) {
1157		if (ipoib_cm_up(neigh)) {
1158			ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
1159			goto unref;
1160		}
1161	} else if (neigh->ah && neigh->ah->valid) {
1162		neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah,
1163						IPOIB_QPN(phdr->hwaddr));
1164		goto unref;
1165	} else if (neigh->ah) {
1166		neigh_refresh_path(neigh, phdr->hwaddr, dev);
1167	}
1168
1169	if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1170		push_pseudo_header(skb, phdr->hwaddr);
1171		spin_lock_irqsave(&priv->lock, flags);
1172		__skb_queue_tail(&neigh->queue, skb);
1173		spin_unlock_irqrestore(&priv->lock, flags);
1174	} else {
1175		++dev->stats.tx_dropped;
1176		dev_kfree_skb_any(skb);
1177	}
1178
1179unref:
1180	ipoib_neigh_put(neigh);
1181
1182	return NETDEV_TX_OK;
1183}
1184
1185static void ipoib_timeout(struct net_device *dev)
1186{
1187	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 
1188
 
 
 
 
1189	ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
1190		   jiffies_to_msecs(jiffies - dev_trans_start(dev)));
1191	ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
1192		   netif_queue_stopped(dev),
1193		   priv->tx_head, priv->tx_tail);
1194	/* XXX reset QP, etc. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1195}
1196
1197static int ipoib_hard_header(struct sk_buff *skb,
1198			     struct net_device *dev,
1199			     unsigned short type,
1200			     const void *daddr,
1201			     const void *saddr,
1202			     unsigned int len)
1203{
1204	struct ipoib_header *header;
1205
1206	header = skb_push(skb, sizeof(*header));
1207
1208	header->proto = htons(type);
1209	header->reserved = 0;
1210
1211	/*
1212	 * we don't rely on dst_entry structure,  always stuff the
1213	 * destination address into skb hard header so we can figure out where
1214	 * to send the packet later.
1215	 */
1216	push_pseudo_header(skb, daddr);
1217
1218	return IPOIB_HARD_LEN;
1219}
1220
1221static void ipoib_set_mcast_list(struct net_device *dev)
1222{
1223	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1224
1225	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
1226		ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
1227		return;
1228	}
1229
1230	queue_work(priv->wq, &priv->restart_task);
1231}
1232
1233static int ipoib_get_iflink(const struct net_device *dev)
1234{
1235	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1236
1237	/* parent interface */
1238	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1239		return dev->ifindex;
1240
1241	/* child/vlan interface */
1242	return priv->parent->ifindex;
1243}
1244
1245static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
1246{
1247	/*
1248	 * Use only the address parts that contributes to spreading
1249	 * The subnet prefix is not used as one can not connect to
1250	 * same remote port (GUID) using the same remote QPN via two
1251	 * different subnets.
1252	 */
1253	 /* qpn octets[1:4) & port GUID octets[12:20) */
1254	u32 *d32 = (u32 *) daddr;
1255	u32 hv;
1256
1257	hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0);
1258	return hv & htbl->mask;
1259}
1260
1261struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
1262{
1263	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1264	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1265	struct ipoib_neigh_hash *htbl;
1266	struct ipoib_neigh *neigh = NULL;
1267	u32 hash_val;
1268
1269	rcu_read_lock_bh();
1270
1271	htbl = rcu_dereference_bh(ntbl->htbl);
1272
1273	if (!htbl)
1274		goto out_unlock;
1275
1276	hash_val = ipoib_addr_hash(htbl, daddr);
1277	for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
1278	     neigh != NULL;
1279	     neigh = rcu_dereference_bh(neigh->hnext)) {
1280		if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1281			/* found, take one ref on behalf of the caller */
1282			if (!atomic_inc_not_zero(&neigh->refcnt)) {
1283				/* deleted */
1284				neigh = NULL;
1285				goto out_unlock;
1286			}
1287
1288			if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
1289				neigh->alive = jiffies;
1290			goto out_unlock;
1291		}
1292	}
1293
1294out_unlock:
1295	rcu_read_unlock_bh();
1296	return neigh;
1297}
1298
1299static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
1300{
1301	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1302	struct ipoib_neigh_hash *htbl;
1303	unsigned long neigh_obsolete;
1304	unsigned long dt;
1305	unsigned long flags;
1306	int i;
1307	LIST_HEAD(remove_list);
1308
1309	spin_lock_irqsave(&priv->lock, flags);
1310
1311	htbl = rcu_dereference_protected(ntbl->htbl,
1312					 lockdep_is_held(&priv->lock));
1313
1314	if (!htbl)
1315		goto out_unlock;
1316
1317	/* neigh is obsolete if it was idle for two GC periods */
1318	dt = 2 * arp_tbl.gc_interval;
1319	neigh_obsolete = jiffies - dt;
1320
1321	for (i = 0; i < htbl->size; i++) {
1322		struct ipoib_neigh *neigh;
1323		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1324
1325		while ((neigh = rcu_dereference_protected(*np,
1326							  lockdep_is_held(&priv->lock))) != NULL) {
1327			/* was the neigh idle for two GC periods */
1328			if (time_after(neigh_obsolete, neigh->alive)) {
1329
1330				ipoib_check_and_add_mcast_sendonly(priv, neigh->daddr + 4, &remove_list);
1331
1332				rcu_assign_pointer(*np,
1333						   rcu_dereference_protected(neigh->hnext,
1334									     lockdep_is_held(&priv->lock)));
1335				/* remove from path/mc list */
1336				list_del_init(&neigh->list);
1337				call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1338			} else {
1339				np = &neigh->hnext;
1340			}
1341
1342		}
1343	}
1344
1345out_unlock:
1346	spin_unlock_irqrestore(&priv->lock, flags);
1347	ipoib_mcast_remove_list(&remove_list);
1348}
1349
1350static void ipoib_reap_neigh(struct work_struct *work)
1351{
1352	struct ipoib_dev_priv *priv =
1353		container_of(work, struct ipoib_dev_priv, neigh_reap_task.work);
1354
1355	__ipoib_reap_neigh(priv);
1356
1357	queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1358			   arp_tbl.gc_interval);
1359}
1360
1361
1362static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
1363				      struct net_device *dev)
1364{
1365	struct ipoib_neigh *neigh;
1366
1367	neigh = kzalloc(sizeof(*neigh), GFP_ATOMIC);
1368	if (!neigh)
1369		return NULL;
1370
1371	neigh->dev = dev;
1372	memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr));
1373	skb_queue_head_init(&neigh->queue);
1374	INIT_LIST_HEAD(&neigh->list);
1375	ipoib_cm_set(neigh, NULL);
1376	/* one ref on behalf of the caller */
1377	atomic_set(&neigh->refcnt, 1);
1378
1379	return neigh;
1380}
1381
1382struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
1383				      struct net_device *dev)
1384{
1385	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1386	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1387	struct ipoib_neigh_hash *htbl;
1388	struct ipoib_neigh *neigh;
1389	u32 hash_val;
1390
1391	htbl = rcu_dereference_protected(ntbl->htbl,
1392					 lockdep_is_held(&priv->lock));
1393	if (!htbl) {
1394		neigh = NULL;
1395		goto out_unlock;
1396	}
1397
1398	/* need to add a new neigh, but maybe some other thread succeeded?
1399	 * recalc hash, maybe hash resize took place so we do a search
1400	 */
1401	hash_val = ipoib_addr_hash(htbl, daddr);
1402	for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
1403					       lockdep_is_held(&priv->lock));
1404	     neigh != NULL;
1405	     neigh = rcu_dereference_protected(neigh->hnext,
1406					       lockdep_is_held(&priv->lock))) {
1407		if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
1408			/* found, take one ref on behalf of the caller */
1409			if (!atomic_inc_not_zero(&neigh->refcnt)) {
1410				/* deleted */
1411				neigh = NULL;
1412				break;
1413			}
1414			neigh->alive = jiffies;
1415			goto out_unlock;
1416		}
1417	}
1418
1419	neigh = ipoib_neigh_ctor(daddr, dev);
1420	if (!neigh)
1421		goto out_unlock;
1422
1423	/* one ref on behalf of the hash table */
1424	atomic_inc(&neigh->refcnt);
1425	neigh->alive = jiffies;
1426	/* put in hash */
1427	rcu_assign_pointer(neigh->hnext,
1428			   rcu_dereference_protected(htbl->buckets[hash_val],
1429						     lockdep_is_held(&priv->lock)));
1430	rcu_assign_pointer(htbl->buckets[hash_val], neigh);
1431	atomic_inc(&ntbl->entries);
1432
1433out_unlock:
1434
1435	return neigh;
1436}
1437
1438void ipoib_neigh_dtor(struct ipoib_neigh *neigh)
1439{
1440	/* neigh reference count was dropprd to zero */
1441	struct net_device *dev = neigh->dev;
1442	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1443	struct sk_buff *skb;
1444	if (neigh->ah)
1445		ipoib_put_ah(neigh->ah);
1446	while ((skb = __skb_dequeue(&neigh->queue))) {
1447		++dev->stats.tx_dropped;
1448		dev_kfree_skb_any(skb);
1449	}
1450	if (ipoib_cm_get(neigh))
1451		ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
1452	ipoib_dbg(ipoib_priv(dev),
1453		  "neigh free for %06x %pI6\n",
1454		  IPOIB_QPN(neigh->daddr),
1455		  neigh->daddr + 4);
1456	kfree(neigh);
1457	if (atomic_dec_and_test(&priv->ntbl.entries)) {
1458		if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags))
1459			complete(&priv->ntbl.flushed);
1460	}
1461}
1462
1463static void ipoib_neigh_reclaim(struct rcu_head *rp)
1464{
1465	/* Called as a result of removal from hash table */
1466	struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu);
1467	/* note TX context may hold another ref */
1468	ipoib_neigh_put(neigh);
1469}
1470
1471void ipoib_neigh_free(struct ipoib_neigh *neigh)
1472{
1473	struct net_device *dev = neigh->dev;
1474	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1475	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1476	struct ipoib_neigh_hash *htbl;
1477	struct ipoib_neigh __rcu **np;
1478	struct ipoib_neigh *n;
1479	u32 hash_val;
1480
1481	htbl = rcu_dereference_protected(ntbl->htbl,
1482					lockdep_is_held(&priv->lock));
1483	if (!htbl)
1484		return;
1485
1486	hash_val = ipoib_addr_hash(htbl, neigh->daddr);
1487	np = &htbl->buckets[hash_val];
1488	for (n = rcu_dereference_protected(*np,
1489					    lockdep_is_held(&priv->lock));
1490	     n != NULL;
1491	     n = rcu_dereference_protected(*np,
1492					lockdep_is_held(&priv->lock))) {
1493		if (n == neigh) {
1494			/* found */
1495			rcu_assign_pointer(*np,
1496					   rcu_dereference_protected(neigh->hnext,
1497								     lockdep_is_held(&priv->lock)));
1498			/* remove from parent list */
1499			list_del_init(&neigh->list);
1500			call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1501			return;
1502		} else {
1503			np = &n->hnext;
1504		}
1505	}
1506}
1507
1508static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
1509{
1510	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1511	struct ipoib_neigh_hash *htbl;
1512	struct ipoib_neigh __rcu **buckets;
1513	u32 size;
1514
1515	clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1516	ntbl->htbl = NULL;
1517	htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
1518	if (!htbl)
1519		return -ENOMEM;
1520	size = roundup_pow_of_two(arp_tbl.gc_thresh3);
1521	buckets = kvcalloc(size, sizeof(*buckets), GFP_KERNEL);
1522	if (!buckets) {
1523		kfree(htbl);
1524		return -ENOMEM;
1525	}
1526	htbl->size = size;
1527	htbl->mask = (size - 1);
1528	htbl->buckets = buckets;
1529	RCU_INIT_POINTER(ntbl->htbl, htbl);
1530	htbl->ntbl = ntbl;
1531	atomic_set(&ntbl->entries, 0);
1532
1533	/* start garbage collection */
1534	queue_delayed_work(priv->wq, &priv->neigh_reap_task,
1535			   arp_tbl.gc_interval);
1536
1537	return 0;
1538}
1539
1540static void neigh_hash_free_rcu(struct rcu_head *head)
1541{
1542	struct ipoib_neigh_hash *htbl = container_of(head,
1543						    struct ipoib_neigh_hash,
1544						    rcu);
1545	struct ipoib_neigh __rcu **buckets = htbl->buckets;
1546	struct ipoib_neigh_table *ntbl = htbl->ntbl;
1547
1548	kvfree(buckets);
1549	kfree(htbl);
1550	complete(&ntbl->deleted);
1551}
1552
1553void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
1554{
1555	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1556	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1557	struct ipoib_neigh_hash *htbl;
1558	unsigned long flags;
1559	int i;
1560
1561	/* remove all neigh connected to a given path or mcast */
1562	spin_lock_irqsave(&priv->lock, flags);
1563
1564	htbl = rcu_dereference_protected(ntbl->htbl,
1565					 lockdep_is_held(&priv->lock));
1566
1567	if (!htbl)
1568		goto out_unlock;
1569
1570	for (i = 0; i < htbl->size; i++) {
1571		struct ipoib_neigh *neigh;
1572		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1573
1574		while ((neigh = rcu_dereference_protected(*np,
1575							  lockdep_is_held(&priv->lock))) != NULL) {
1576			/* delete neighs belong to this parent */
1577			if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) {
1578				rcu_assign_pointer(*np,
1579						   rcu_dereference_protected(neigh->hnext,
1580									     lockdep_is_held(&priv->lock)));
1581				/* remove from parent list */
1582				list_del_init(&neigh->list);
1583				call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1584			} else {
1585				np = &neigh->hnext;
1586			}
1587
1588		}
1589	}
1590out_unlock:
1591	spin_unlock_irqrestore(&priv->lock, flags);
1592}
1593
1594static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
1595{
1596	struct ipoib_neigh_table *ntbl = &priv->ntbl;
1597	struct ipoib_neigh_hash *htbl;
1598	unsigned long flags;
1599	int i, wait_flushed = 0;
1600
1601	init_completion(&priv->ntbl.flushed);
1602	set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1603
1604	spin_lock_irqsave(&priv->lock, flags);
1605
1606	htbl = rcu_dereference_protected(ntbl->htbl,
1607					lockdep_is_held(&priv->lock));
1608	if (!htbl)
1609		goto out_unlock;
1610
1611	wait_flushed = atomic_read(&priv->ntbl.entries);
1612	if (!wait_flushed)
1613		goto free_htbl;
1614
1615	for (i = 0; i < htbl->size; i++) {
1616		struct ipoib_neigh *neigh;
1617		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
1618
1619		while ((neigh = rcu_dereference_protected(*np,
1620				       lockdep_is_held(&priv->lock))) != NULL) {
1621			rcu_assign_pointer(*np,
1622					   rcu_dereference_protected(neigh->hnext,
1623								     lockdep_is_held(&priv->lock)));
1624			/* remove from path/mc list */
1625			list_del_init(&neigh->list);
1626			call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
1627		}
1628	}
1629
1630free_htbl:
1631	rcu_assign_pointer(ntbl->htbl, NULL);
1632	call_rcu(&htbl->rcu, neigh_hash_free_rcu);
1633
1634out_unlock:
1635	spin_unlock_irqrestore(&priv->lock, flags);
1636	if (wait_flushed)
1637		wait_for_completion(&priv->ntbl.flushed);
1638}
1639
1640static void ipoib_neigh_hash_uninit(struct net_device *dev)
1641{
1642	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1643
1644	ipoib_dbg(priv, "%s\n", __func__);
1645	init_completion(&priv->ntbl.deleted);
1646
1647	cancel_delayed_work_sync(&priv->neigh_reap_task);
1648
1649	ipoib_flush_neighs(priv);
1650
1651	wait_for_completion(&priv->ntbl.deleted);
1652}
1653
1654static void ipoib_napi_add(struct net_device *dev)
1655{
1656	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1657
1658	netif_napi_add(dev, &priv->recv_napi, ipoib_rx_poll, IPOIB_NUM_WC);
1659	netif_napi_add(dev, &priv->send_napi, ipoib_tx_poll, MAX_SEND_CQE);
 
 
1660}
1661
1662static void ipoib_napi_del(struct net_device *dev)
1663{
1664	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1665
1666	netif_napi_del(&priv->recv_napi);
1667	netif_napi_del(&priv->send_napi);
1668}
1669
1670static void ipoib_dev_uninit_default(struct net_device *dev)
1671{
1672	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1673
1674	ipoib_transport_dev_cleanup(dev);
1675
1676	ipoib_napi_del(dev);
1677
1678	ipoib_cm_dev_cleanup(dev);
1679
1680	kfree(priv->rx_ring);
1681	vfree(priv->tx_ring);
1682
1683	priv->rx_ring = NULL;
1684	priv->tx_ring = NULL;
1685}
1686
1687static int ipoib_dev_init_default(struct net_device *dev)
1688{
1689	struct ipoib_dev_priv *priv = ipoib_priv(dev);
 
1690
1691	ipoib_napi_add(dev);
1692
1693	/* Allocate RX/TX "rings" to hold queued skbs */
1694	priv->rx_ring =	kcalloc(ipoib_recvq_size,
1695				       sizeof(*priv->rx_ring),
1696				       GFP_KERNEL);
1697	if (!priv->rx_ring)
1698		goto out;
1699
1700	priv->tx_ring = vzalloc(array_size(ipoib_sendq_size,
1701					   sizeof(*priv->tx_ring)));
1702	if (!priv->tx_ring) {
1703		pr_warn("%s: failed to allocate TX ring (%d entries)\n",
1704			priv->ca->name, ipoib_sendq_size);
1705		goto out_rx_ring_cleanup;
1706	}
1707
1708	/* priv->tx_head, tx_tail & tx_outstanding are already 0 */
1709
1710	if (ipoib_transport_dev_init(dev, priv->ca)) {
1711		pr_warn("%s: ipoib_transport_dev_init failed\n",
1712			priv->ca->name);
1713		goto out_tx_ring_cleanup;
1714	}
1715
1716	/* after qp created set dev address */
1717	priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff;
1718	priv->dev->dev_addr[2] = (priv->qp->qp_num >>  8) & 0xff;
1719	priv->dev->dev_addr[3] = (priv->qp->qp_num) & 0xff;
 
1720
1721	return 0;
1722
1723out_tx_ring_cleanup:
1724	vfree(priv->tx_ring);
1725
1726out_rx_ring_cleanup:
1727	kfree(priv->rx_ring);
1728
1729out:
1730	ipoib_napi_del(dev);
1731	return -ENOMEM;
1732}
1733
1734static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
1735		       int cmd)
1736{
1737	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1738
1739	if (!priv->rn_ops->ndo_do_ioctl)
1740		return -EOPNOTSUPP;
1741
1742	return priv->rn_ops->ndo_do_ioctl(dev, ifr, cmd);
1743}
1744
1745static int ipoib_dev_init(struct net_device *dev)
1746{
1747	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1748	int ret = -ENOMEM;
1749
1750	priv->qp = NULL;
1751
1752	/*
1753	 * the various IPoIB tasks assume they will never race against
1754	 * themselves, so always use a single thread workqueue
1755	 */
1756	priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM);
1757	if (!priv->wq) {
1758		pr_warn("%s: failed to allocate device WQ\n", dev->name);
1759		goto out;
1760	}
1761
1762	/* create pd, which used both for control and datapath*/
1763	priv->pd = ib_alloc_pd(priv->ca, 0);
1764	if (IS_ERR(priv->pd)) {
1765		pr_warn("%s: failed to allocate PD\n", priv->ca->name);
1766		goto clean_wq;
1767	}
1768
1769	ret = priv->rn_ops->ndo_init(dev);
1770	if (ret) {
1771		pr_warn("%s failed to init HW resource\n", dev->name);
1772		goto out_free_pd;
1773	}
1774
1775	ret = ipoib_neigh_hash_init(priv);
1776	if (ret) {
1777		pr_warn("%s failed to init neigh hash\n", dev->name);
1778		goto out_dev_uninit;
1779	}
1780
1781	if (dev->flags & IFF_UP) {
1782		if (ipoib_ib_dev_open(dev)) {
1783			pr_warn("%s failed to open device\n", dev->name);
1784			ret = -ENODEV;
1785			goto out_hash_uninit;
1786		}
1787	}
1788
1789	return 0;
1790
1791out_hash_uninit:
1792	ipoib_neigh_hash_uninit(dev);
1793
1794out_dev_uninit:
1795	ipoib_ib_dev_cleanup(dev);
1796
1797out_free_pd:
1798	if (priv->pd) {
1799		ib_dealloc_pd(priv->pd);
1800		priv->pd = NULL;
1801	}
1802
1803clean_wq:
1804	if (priv->wq) {
1805		destroy_workqueue(priv->wq);
1806		priv->wq = NULL;
1807	}
1808
1809out:
1810	return ret;
1811}
1812
1813/*
1814 * This must be called before doing an unregister_netdev on a parent device to
1815 * shutdown the IB event handler.
1816 */
1817static void ipoib_parent_unregister_pre(struct net_device *ndev)
1818{
1819	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1820
1821	/*
1822	 * ipoib_set_mac checks netif_running before pushing work, clearing
1823	 * running ensures the it will not add more work.
1824	 */
1825	rtnl_lock();
1826	dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP, NULL);
1827	rtnl_unlock();
1828
1829	/* ipoib_event() cannot be running once this returns */
1830	ib_unregister_event_handler(&priv->event_handler);
1831
1832	/*
1833	 * Work on the queue grabs the rtnl lock, so this cannot be done while
1834	 * also holding it.
1835	 */
1836	flush_workqueue(ipoib_workqueue);
1837}
1838
1839static void ipoib_set_dev_features(struct ipoib_dev_priv *priv)
1840{
1841	priv->hca_caps = priv->ca->attrs.device_cap_flags;
 
1842
1843	if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1844		priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1845
1846		if (priv->hca_caps & IB_DEVICE_UD_TSO)
1847			priv->dev->hw_features |= NETIF_F_TSO;
1848
1849		priv->dev->features |= priv->dev->hw_features;
1850	}
1851}
1852
1853static int ipoib_parent_init(struct net_device *ndev)
1854{
1855	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1856	struct ib_port_attr attr;
1857	int result;
1858
1859	result = ib_query_port(priv->ca, priv->port, &attr);
1860	if (result) {
1861		pr_warn("%s: ib_query_port %d failed\n", priv->ca->name,
1862			priv->port);
1863		return result;
1864	}
1865	priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1866
1867	result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
1868	if (result) {
1869		pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n",
1870			priv->ca->name, priv->port, result);
1871		return result;
1872	}
1873
1874	result = rdma_query_gid(priv->ca, priv->port, 0, &priv->local_gid);
1875	if (result) {
1876		pr_warn("%s: rdma_query_gid port %d failed (ret = %d)\n",
1877			priv->ca->name, priv->port, result);
1878		return result;
1879	}
1880	memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
1881	       sizeof(union ib_gid));
1882
1883	SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent);
1884	priv->dev->dev_port = priv->port - 1;
1885	/* Let's set this one too for backwards compatibility. */
1886	priv->dev->dev_id = priv->port - 1;
1887
1888	return 0;
1889}
1890
1891static void ipoib_child_init(struct net_device *ndev)
1892{
1893	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1894	struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
1895
1896	priv->max_ib_mtu = ppriv->max_ib_mtu;
1897	set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
1898	memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
1899	memcpy(&priv->local_gid, &ppriv->local_gid, sizeof(priv->local_gid));
 
 
 
 
 
 
 
1900}
1901
1902static int ipoib_ndo_init(struct net_device *ndev)
1903{
1904	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
1905	int rc;
 
1906
1907	if (priv->parent) {
1908		ipoib_child_init(ndev);
1909	} else {
1910		rc = ipoib_parent_init(ndev);
1911		if (rc)
1912			return rc;
1913	}
1914
1915	/* MTU will be reset when mcast join happens */
1916	ndev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
1917	priv->mcast_mtu = priv->admin_mtu = ndev->mtu;
 
1918	ndev->max_mtu = IPOIB_CM_MTU;
1919
1920	ndev->neigh_priv_len = sizeof(struct ipoib_neigh);
1921
1922	/*
1923	 * Set the full membership bit, so that we join the right
1924	 * broadcast group, etc.
1925	 */
1926	priv->pkey |= 0x8000;
1927
1928	ndev->broadcast[8] = priv->pkey >> 8;
1929	ndev->broadcast[9] = priv->pkey & 0xff;
1930	set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
1931
1932	ipoib_set_dev_features(priv);
1933
1934	rc = ipoib_dev_init(ndev);
1935	if (rc) {
1936		pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n",
1937			priv->ca->name, priv->dev->name, priv->port, rc);
1938		return rc;
1939	}
1940
1941	if (priv->parent) {
1942		struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
1943
1944		dev_hold(priv->parent);
1945
1946		down_write(&ppriv->vlan_rwsem);
1947		list_add_tail(&priv->list, &ppriv->child_intfs);
1948		up_write(&ppriv->vlan_rwsem);
1949	}
1950
1951	return 0;
1952}
1953
1954static void ipoib_ndo_uninit(struct net_device *dev)
1955{
1956	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1957
1958	ASSERT_RTNL();
1959
1960	/*
1961	 * ipoib_remove_one guarantees the children are removed before the
1962	 * parent, and that is the only place where a parent can be removed.
1963	 */
1964	WARN_ON(!list_empty(&priv->child_intfs));
1965
1966	if (priv->parent) {
1967		struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
1968
1969		down_write(&ppriv->vlan_rwsem);
1970		list_del(&priv->list);
1971		up_write(&ppriv->vlan_rwsem);
1972	}
1973
1974	ipoib_neigh_hash_uninit(dev);
1975
1976	ipoib_ib_dev_cleanup(dev);
1977
1978	/* no more works over the priv->wq */
1979	if (priv->wq) {
1980		flush_workqueue(priv->wq);
 
1981		destroy_workqueue(priv->wq);
1982		priv->wq = NULL;
1983	}
1984
1985	if (priv->parent)
1986		dev_put(priv->parent);
1987}
1988
1989static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
1990{
1991	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1992
1993	return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state);
1994}
1995
1996static int ipoib_get_vf_config(struct net_device *dev, int vf,
1997			       struct ifla_vf_info *ivf)
1998{
1999	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2000	int err;
2001
2002	err = ib_get_vf_config(priv->ca, vf, priv->port, ivf);
2003	if (err)
2004		return err;
2005
2006	ivf->vf = vf;
2007	memcpy(ivf->mac, dev->dev_addr, dev->addr_len);
2008
2009	return 0;
2010}
2011
2012static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
2013{
2014	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2015
2016	if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID)
2017		return -EINVAL;
2018
2019	return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
2020}
2021
 
 
 
 
 
 
 
 
 
2022static int ipoib_get_vf_stats(struct net_device *dev, int vf,
2023			      struct ifla_vf_stats *vf_stats)
2024{
2025	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2026
2027	return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats);
2028}
2029
2030static const struct header_ops ipoib_header_ops = {
2031	.create	= ipoib_hard_header,
2032};
2033
2034static const struct net_device_ops ipoib_netdev_ops_pf = {
2035	.ndo_init		 = ipoib_ndo_init,
2036	.ndo_uninit		 = ipoib_ndo_uninit,
2037	.ndo_open		 = ipoib_open,
2038	.ndo_stop		 = ipoib_stop,
2039	.ndo_change_mtu		 = ipoib_change_mtu,
2040	.ndo_fix_features	 = ipoib_fix_features,
2041	.ndo_start_xmit		 = ipoib_start_xmit,
2042	.ndo_tx_timeout		 = ipoib_timeout,
2043	.ndo_set_rx_mode	 = ipoib_set_mcast_list,
2044	.ndo_get_iflink		 = ipoib_get_iflink,
2045	.ndo_set_vf_link_state	 = ipoib_set_vf_link_state,
2046	.ndo_get_vf_config	 = ipoib_get_vf_config,
2047	.ndo_get_vf_stats	 = ipoib_get_vf_stats,
 
2048	.ndo_set_vf_guid	 = ipoib_set_vf_guid,
2049	.ndo_set_mac_address	 = ipoib_set_mac,
2050	.ndo_get_stats64	 = ipoib_get_stats,
2051	.ndo_do_ioctl		 = ipoib_ioctl,
2052};
2053
2054static const struct net_device_ops ipoib_netdev_ops_vf = {
2055	.ndo_init		 = ipoib_ndo_init,
2056	.ndo_uninit		 = ipoib_ndo_uninit,
2057	.ndo_open		 = ipoib_open,
2058	.ndo_stop		 = ipoib_stop,
2059	.ndo_change_mtu		 = ipoib_change_mtu,
2060	.ndo_fix_features	 = ipoib_fix_features,
2061	.ndo_start_xmit	 	 = ipoib_start_xmit,
2062	.ndo_tx_timeout		 = ipoib_timeout,
2063	.ndo_set_rx_mode	 = ipoib_set_mcast_list,
2064	.ndo_get_iflink		 = ipoib_get_iflink,
2065	.ndo_get_stats64	 = ipoib_get_stats,
2066	.ndo_do_ioctl		 = ipoib_ioctl,
 
 
 
 
 
 
 
2067};
2068
2069void ipoib_setup_common(struct net_device *dev)
2070{
2071	dev->header_ops		 = &ipoib_header_ops;
 
2072
2073	ipoib_set_ethtool_ops(dev);
2074
2075	dev->watchdog_timeo	 = HZ;
2076
2077	dev->flags		|= IFF_BROADCAST | IFF_MULTICAST;
2078
2079	dev->hard_header_len	 = IPOIB_HARD_LEN;
2080	dev->addr_len		 = INFINIBAND_ALEN;
2081	dev->type		 = ARPHRD_INFINIBAND;
2082	dev->tx_queue_len	 = ipoib_sendq_size * 2;
2083	dev->features		 = (NETIF_F_VLAN_CHALLENGED	|
2084				    NETIF_F_HIGHDMA);
2085	netif_keep_dst(dev);
2086
2087	memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
2088
2089	/*
2090	 * unregister_netdev always frees the netdev, we use this mode
2091	 * consistently to unify all the various unregister paths, including
2092	 * those connected to rtnl_link_ops which require it.
2093	 */
2094	dev->needs_free_netdev = true;
2095}
2096
2097static void ipoib_build_priv(struct net_device *dev)
2098{
2099	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2100
2101	priv->dev = dev;
2102	spin_lock_init(&priv->lock);
2103	init_rwsem(&priv->vlan_rwsem);
2104	mutex_init(&priv->mcast_mutex);
2105
2106	INIT_LIST_HEAD(&priv->path_list);
2107	INIT_LIST_HEAD(&priv->child_intfs);
2108	INIT_LIST_HEAD(&priv->dead_ahs);
2109	INIT_LIST_HEAD(&priv->multicast_list);
2110
2111	INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
2112	INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
 
2113	INIT_WORK(&priv->flush_light,   ipoib_ib_dev_flush_light);
2114	INIT_WORK(&priv->flush_normal,   ipoib_ib_dev_flush_normal);
2115	INIT_WORK(&priv->flush_heavy,   ipoib_ib_dev_flush_heavy);
2116	INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
 
2117	INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
2118	INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
2119}
2120
2121static const struct net_device_ops ipoib_netdev_default_pf = {
2122	.ndo_init		 = ipoib_dev_init_default,
2123	.ndo_uninit		 = ipoib_dev_uninit_default,
2124	.ndo_open		 = ipoib_ib_dev_open_default,
2125	.ndo_stop		 = ipoib_ib_dev_stop_default,
2126};
2127
2128static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u8 port,
2129					     const char *name)
2130{
2131	struct net_device *dev;
2132
2133	dev = rdma_alloc_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
2134				NET_NAME_UNKNOWN, ipoib_setup_common);
2135	if (!IS_ERR(dev) || PTR_ERR(dev) != -EOPNOTSUPP)
2136		return dev;
2137
2138	dev = alloc_netdev(sizeof(struct rdma_netdev), name, NET_NAME_UNKNOWN,
2139			   ipoib_setup_common);
2140	if (!dev)
2141		return ERR_PTR(-ENOMEM);
2142	return dev;
2143}
2144
2145int ipoib_intf_init(struct ib_device *hca, u8 port, const char *name,
2146		    struct net_device *dev)
2147{
2148	struct rdma_netdev *rn = netdev_priv(dev);
2149	struct ipoib_dev_priv *priv;
2150	int rc;
2151
2152	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
2153	if (!priv)
2154		return -ENOMEM;
2155
2156	priv->ca = hca;
2157	priv->port = port;
2158
2159	rc = rdma_init_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
2160			      NET_NAME_UNKNOWN, ipoib_setup_common, dev);
2161	if (rc) {
2162		if (rc != -EOPNOTSUPP)
2163			goto out;
2164
2165		dev->netdev_ops = &ipoib_netdev_default_pf;
2166		rn->send = ipoib_send;
2167		rn->attach_mcast = ipoib_mcast_attach;
2168		rn->detach_mcast = ipoib_mcast_detach;
2169		rn->hca = hca;
 
 
 
 
 
 
 
 
2170	}
2171
2172	priv->rn_ops = dev->netdev_ops;
2173
2174	if (hca->attrs.device_cap_flags & IB_DEVICE_VIRTUAL_FUNCTION)
2175		dev->netdev_ops	= &ipoib_netdev_ops_vf;
2176	else
2177		dev->netdev_ops	= &ipoib_netdev_ops_pf;
2178
2179	rn->clnt_priv = priv;
2180	/*
2181	 * Only the child register_netdev flows can handle priv_destructor
2182	 * being set, so we force it to NULL here and handle manually until it
2183	 * is safe to turn on.
2184	 */
2185	priv->next_priv_destructor = dev->priv_destructor;
2186	dev->priv_destructor = NULL;
2187
2188	ipoib_build_priv(dev);
2189
2190	return 0;
2191
2192out:
2193	kfree(priv);
2194	return rc;
2195}
2196
2197struct net_device *ipoib_intf_alloc(struct ib_device *hca, u8 port,
2198				    const char *name)
2199{
2200	struct net_device *dev;
2201	int rc;
2202
2203	dev = ipoib_alloc_netdev(hca, port, name);
2204	if (IS_ERR(dev))
2205		return dev;
2206
2207	rc = ipoib_intf_init(hca, port, name, dev);
2208	if (rc) {
2209		free_netdev(dev);
2210		return ERR_PTR(rc);
2211	}
2212
2213	/*
2214	 * Upon success the caller must ensure ipoib_intf_free is called or
2215	 * register_netdevice succeed'd and priv_destructor is set to
2216	 * ipoib_intf_free.
2217	 */
2218	return dev;
2219}
2220
2221void ipoib_intf_free(struct net_device *dev)
2222{
2223	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2224	struct rdma_netdev *rn = netdev_priv(dev);
2225
2226	dev->priv_destructor = priv->next_priv_destructor;
2227	if (dev->priv_destructor)
2228		dev->priv_destructor(dev);
2229
2230	/*
2231	 * There are some error flows around register_netdev failing that may
2232	 * attempt to call priv_destructor twice, prevent that from happening.
2233	 */
2234	dev->priv_destructor = NULL;
2235
2236	/* unregister/destroy is very complicated. Make bugs more obvious. */
2237	rn->clnt_priv = NULL;
2238
2239	kfree(priv);
2240}
2241
2242static ssize_t show_pkey(struct device *dev,
2243			 struct device_attribute *attr, char *buf)
2244{
2245	struct net_device *ndev = to_net_dev(dev);
2246	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
2247
2248	return sprintf(buf, "0x%04x\n", priv->pkey);
2249}
2250static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2251
2252static ssize_t show_umcast(struct device *dev,
2253			   struct device_attribute *attr, char *buf)
2254{
2255	struct net_device *ndev = to_net_dev(dev);
2256	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
2257
2258	return sprintf(buf, "%d\n", test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
 
2259}
2260
2261void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
2262{
2263	struct ipoib_dev_priv *priv = ipoib_priv(ndev);
2264
2265	if (umcast_val > 0) {
2266		set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
2267		ipoib_warn(priv, "ignoring multicast groups joined directly "
2268				"by userspace\n");
2269	} else
2270		clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
2271}
2272
2273static ssize_t set_umcast(struct device *dev,
2274			  struct device_attribute *attr,
2275			  const char *buf, size_t count)
2276{
2277	unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
2278
2279	ipoib_set_umcast(to_net_dev(dev), umcast_val);
2280
2281	return count;
2282}
2283static DEVICE_ATTR(umcast, S_IWUSR | S_IRUGO, show_umcast, set_umcast);
2284
2285int ipoib_add_umcast_attr(struct net_device *dev)
2286{
2287	return device_create_file(&dev->dev, &dev_attr_umcast);
2288}
2289
2290static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
2291{
2292	struct ipoib_dev_priv *child_priv;
2293	struct net_device *netdev = priv->dev;
2294
2295	netif_addr_lock_bh(netdev);
2296
2297	memcpy(&priv->local_gid.global.interface_id,
2298	       &gid->global.interface_id,
2299	       sizeof(gid->global.interface_id));
2300	memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
2301	clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
2302
2303	netif_addr_unlock_bh(netdev);
2304
2305	if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
2306		down_read(&priv->vlan_rwsem);
2307		list_for_each_entry(child_priv, &priv->child_intfs, list)
2308			set_base_guid(child_priv, gid);
2309		up_read(&priv->vlan_rwsem);
2310	}
2311}
2312
2313static int ipoib_check_lladdr(struct net_device *dev,
2314			      struct sockaddr_storage *ss)
2315{
2316	union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
2317	int ret = 0;
2318
2319	netif_addr_lock_bh(dev);
2320
2321	/* Make sure the QPN, reserved and subnet prefix match the current
2322	 * lladdr, it also makes sure the lladdr is unicast.
2323	 */
2324	if (memcmp(dev->dev_addr, ss->__data,
2325		   4 + sizeof(gid->global.subnet_prefix)) ||
2326	    gid->global.interface_id == 0)
2327		ret = -EINVAL;
2328
2329	netif_addr_unlock_bh(dev);
2330
2331	return ret;
2332}
2333
2334static int ipoib_set_mac(struct net_device *dev, void *addr)
2335{
2336	struct ipoib_dev_priv *priv = ipoib_priv(dev);
2337	struct sockaddr_storage *ss = addr;
2338	int ret;
2339
2340	if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
2341		return -EBUSY;
2342
2343	ret = ipoib_check_lladdr(dev, ss);
2344	if (ret)
2345		return ret;
2346
2347	set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
2348
2349	queue_work(ipoib_workqueue, &priv->flush_light);
2350
2351	return 0;
2352}
2353
2354static ssize_t create_child(struct device *dev,
2355			    struct device_attribute *attr,
2356			    const char *buf, size_t count)
2357{
2358	int pkey;
2359	int ret;
2360
2361	if (sscanf(buf, "%i", &pkey) != 1)
2362		return -EINVAL;
2363
2364	if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
2365		return -EINVAL;
2366
2367	ret = ipoib_vlan_add(to_net_dev(dev), pkey);
2368
2369	return ret ? ret : count;
2370}
2371static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
2372
2373static ssize_t delete_child(struct device *dev,
2374			    struct device_attribute *attr,
2375			    const char *buf, size_t count)
2376{
2377	int pkey;
2378	int ret;
2379
2380	if (sscanf(buf, "%i", &pkey) != 1)
2381		return -EINVAL;
2382
2383	if (pkey < 0 || pkey > 0xffff)
2384		return -EINVAL;
2385
2386	ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
2387
2388	return ret ? ret : count;
2389
2390}
2391static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
2392
2393int ipoib_add_pkey_attr(struct net_device *dev)
2394{
2395	return device_create_file(&dev->dev, &dev_attr_pkey);
2396}
2397
2398/*
2399 * We erroneously exposed the iface's port number in the dev_id
2400 * sysfs field long after dev_port was introduced for that purpose[1],
2401 * and we need to stop everyone from relying on that.
2402 * Let's overload the shower routine for the dev_id file here
2403 * to gently bring the issue up.
2404 *
2405 * [1] https://www.spinics.net/lists/netdev/msg272123.html
2406 */
2407static ssize_t dev_id_show(struct device *dev,
2408			   struct device_attribute *attr, char *buf)
2409{
2410	struct net_device *ndev = to_net_dev(dev);
2411
2412	/*
2413	 * ndev->dev_port will be equal to 0 in old kernel prior to commit
2414	 * 9b8b2a323008 ("IB/ipoib: Use dev_port to expose network interface
2415	 * port numbers") Zero was chosen as special case for user space
2416	 * applications to fallback and query dev_id to check if it has
2417	 * different value or not.
2418	 *
2419	 * Don't print warning in such scenario.
2420	 *
2421	 * https://github.com/systemd/systemd/blob/master/src/udev/udev-builtin-net_id.c#L358
2422	 */
2423	if (ndev->dev_port && ndev->dev_id == ndev->dev_port)
2424		netdev_info_once(ndev,
2425			"\"%s\" wants to know my dev_id. Should it look at dev_port instead? See Documentation/ABI/testing/sysfs-class-net for more info.\n",
2426			current->comm);
2427
2428	return sprintf(buf, "%#x\n", ndev->dev_id);
2429}
2430static DEVICE_ATTR_RO(dev_id);
2431
2432static int ipoib_intercept_dev_id_attr(struct net_device *dev)
2433{
2434	device_remove_file(&dev->dev, &dev_attr_dev_id);
2435	return device_create_file(&dev->dev, &dev_attr_dev_id);
2436}
2437
2438static struct net_device *ipoib_add_port(const char *format,
2439					 struct ib_device *hca, u8 port)
2440{
2441	struct rtnl_link_ops *ops = ipoib_get_link_ops();
2442	struct rdma_netdev_alloc_params params;
2443	struct ipoib_dev_priv *priv;
2444	struct net_device *ndev;
2445	int result;
2446
2447	ndev = ipoib_intf_alloc(hca, port, format);
2448	if (IS_ERR(ndev)) {
2449		pr_warn("%s, %d: ipoib_intf_alloc failed %ld\n", hca->name, port,
2450			PTR_ERR(ndev));
2451		return ndev;
2452	}
2453	priv = ipoib_priv(ndev);
2454
2455	INIT_IB_EVENT_HANDLER(&priv->event_handler,
2456			      priv->ca, ipoib_event);
2457	ib_register_event_handler(&priv->event_handler);
2458
2459	/* call event handler to ensure pkey in sync */
2460	queue_work(ipoib_workqueue, &priv->flush_heavy);
2461
 
 
2462	result = register_netdev(ndev);
2463	if (result) {
2464		pr_warn("%s: couldn't register ipoib port %d; error %d\n",
2465			hca->name, port, result);
2466
2467		ipoib_parent_unregister_pre(ndev);
2468		ipoib_intf_free(ndev);
2469		free_netdev(ndev);
2470
2471		return ERR_PTR(result);
2472	}
2473
2474	if (hca->ops.rdma_netdev_get_params) {
2475		int rc = hca->ops.rdma_netdev_get_params(hca, port,
2476						     RDMA_NETDEV_IPOIB,
2477						     &params);
2478
2479		if (!rc && ops->priv_size < params.sizeof_priv)
2480			ops->priv_size = params.sizeof_priv;
2481	}
2482	/*
2483	 * We cannot set priv_destructor before register_netdev because we
2484	 * need priv to be always valid during the error flow to execute
2485	 * ipoib_parent_unregister_pre(). Instead handle it manually and only
2486	 * enter priv_destructor mode once we are completely registered.
2487	 */
2488	ndev->priv_destructor = ipoib_intf_free;
2489
2490	if (ipoib_intercept_dev_id_attr(ndev))
2491		goto sysfs_failed;
2492	if (ipoib_cm_add_mode_attr(ndev))
2493		goto sysfs_failed;
2494	if (ipoib_add_pkey_attr(ndev))
2495		goto sysfs_failed;
2496	if (ipoib_add_umcast_attr(ndev))
2497		goto sysfs_failed;
2498	if (device_create_file(&ndev->dev, &dev_attr_create_child))
2499		goto sysfs_failed;
2500	if (device_create_file(&ndev->dev, &dev_attr_delete_child))
2501		goto sysfs_failed;
2502
2503	return ndev;
2504
2505sysfs_failed:
2506	ipoib_parent_unregister_pre(ndev);
2507	unregister_netdev(ndev);
2508	return ERR_PTR(-ENOMEM);
2509}
2510
2511static void ipoib_add_one(struct ib_device *device)
2512{
2513	struct list_head *dev_list;
2514	struct net_device *dev;
2515	struct ipoib_dev_priv *priv;
2516	unsigned int p;
2517	int count = 0;
2518
2519	dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL);
2520	if (!dev_list)
2521		return;
2522
2523	INIT_LIST_HEAD(dev_list);
2524
2525	rdma_for_each_port (device, p) {
2526		if (!rdma_protocol_ib(device, p))
2527			continue;
2528		dev = ipoib_add_port("ib%d", device, p);
2529		if (!IS_ERR(dev)) {
2530			priv = ipoib_priv(dev);
2531			list_add_tail(&priv->list, dev_list);
2532			count++;
2533		}
2534	}
2535
2536	if (!count) {
2537		kfree(dev_list);
2538		return;
2539	}
2540
2541	ib_set_client_data(device, &ipoib_client, dev_list);
 
2542}
2543
2544static void ipoib_remove_one(struct ib_device *device, void *client_data)
2545{
2546	struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
2547	struct list_head *dev_list = client_data;
2548
2549	if (!dev_list)
2550		return;
2551
2552	list_for_each_entry_safe(priv, tmp, dev_list, list) {
2553		LIST_HEAD(head);
2554		ipoib_parent_unregister_pre(priv->dev);
2555
2556		rtnl_lock();
2557
2558		list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs,
2559					 list)
2560			unregister_netdevice_queue(cpriv->dev, &head);
2561		unregister_netdevice_queue(priv->dev, &head);
2562		unregister_netdevice_many(&head);
2563
2564		rtnl_unlock();
2565	}
2566
2567	kfree(dev_list);
2568}
2569
2570#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2571static struct notifier_block ipoib_netdev_notifier = {
2572	.notifier_call = ipoib_netdev_event,
2573};
2574#endif
2575
2576static int __init ipoib_init_module(void)
2577{
2578	int ret;
2579
2580	ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
2581	ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
2582	ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
2583
2584	ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
2585	ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
2586	ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
2587#ifdef CONFIG_INFINIBAND_IPOIB_CM
2588	ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
2589	ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);
2590#endif
2591
2592	/*
2593	 * When copying small received packets, we only copy from the
2594	 * linear data part of the SKB, so we rely on this condition.
2595	 */
2596	BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
2597
2598	ipoib_register_debugfs();
2599
2600	/*
2601	 * We create a global workqueue here that is used for all flush
2602	 * operations.  However, if you attempt to flush a workqueue
2603	 * from a task on that same workqueue, it deadlocks the system.
2604	 * We want to be able to flush the tasks associated with a
2605	 * specific net device, so we also create a workqueue for each
2606	 * netdevice.  We queue up the tasks for that device only on
2607	 * its private workqueue, and we only queue up flush events
2608	 * on our global flush workqueue.  This avoids the deadlocks.
2609	 */
2610	ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush", 0);
2611	if (!ipoib_workqueue) {
2612		ret = -ENOMEM;
2613		goto err_fs;
2614	}
2615
2616	ib_sa_register_client(&ipoib_sa_client);
2617
2618	ret = ib_register_client(&ipoib_client);
2619	if (ret)
2620		goto err_sa;
2621
2622	ret = ipoib_netlink_init();
2623	if (ret)
2624		goto err_client;
2625
2626#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2627	register_netdevice_notifier(&ipoib_netdev_notifier);
2628#endif
2629	return 0;
2630
2631err_client:
2632	ib_unregister_client(&ipoib_client);
2633
2634err_sa:
2635	ib_sa_unregister_client(&ipoib_sa_client);
2636	destroy_workqueue(ipoib_workqueue);
2637
2638err_fs:
2639	ipoib_unregister_debugfs();
2640
2641	return ret;
2642}
2643
2644static void __exit ipoib_cleanup_module(void)
2645{
2646#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
2647	unregister_netdevice_notifier(&ipoib_netdev_notifier);
2648#endif
2649	ipoib_netlink_fini();
2650	ib_unregister_client(&ipoib_client);
2651	ib_sa_unregister_client(&ipoib_sa_client);
2652	ipoib_unregister_debugfs();
2653	destroy_workqueue(ipoib_workqueue);
2654}
2655
2656module_init(ipoib_init_module);
2657module_exit(ipoib_cleanup_module);